1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 //
8 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
12 //
test_vmsle_vv_i8mf8_b64(vint8mf8_t op1,vint8mf8_t op2,size_t vl)13 vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
14 return vmsle(op1, op2, vl);
15 }
16
17 //
18 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
22 //
test_vmsle_vx_i8mf8_b64(vint8mf8_t op1,int8_t op2,size_t vl)23 vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
24 return vmsle(op1, op2, vl);
25 }
26
27 //
28 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32(
29 // CHECK-RV64-NEXT: entry:
30 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
32 //
test_vmsle_vv_i8mf4_b32(vint8mf4_t op1,vint8mf4_t op2,size_t vl)33 vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
34 return vmsle(op1, op2, vl);
35 }
36
37 //
38 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32(
39 // CHECK-RV64-NEXT: entry:
40 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
42 //
test_vmsle_vx_i8mf4_b32(vint8mf4_t op1,int8_t op2,size_t vl)43 vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
44 return vmsle(op1, op2, vl);
45 }
46
47 //
48 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16(
49 // CHECK-RV64-NEXT: entry:
50 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
52 //
test_vmsle_vv_i8mf2_b16(vint8mf2_t op1,vint8mf2_t op2,size_t vl)53 vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
54 return vmsle(op1, op2, vl);
55 }
56
57 //
58 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16(
59 // CHECK-RV64-NEXT: entry:
60 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
62 //
test_vmsle_vx_i8mf2_b16(vint8mf2_t op1,int8_t op2,size_t vl)63 vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
64 return vmsle(op1, op2, vl);
65 }
66
67 //
68 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8(
69 // CHECK-RV64-NEXT: entry:
70 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
72 //
test_vmsle_vv_i8m1_b8(vint8m1_t op1,vint8m1_t op2,size_t vl)73 vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
74 return vmsle(op1, op2, vl);
75 }
76
77 //
78 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8(
79 // CHECK-RV64-NEXT: entry:
80 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
82 //
test_vmsle_vx_i8m1_b8(vint8m1_t op1,int8_t op2,size_t vl)83 vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
84 return vmsle(op1, op2, vl);
85 }
86
87 //
88 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4(
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
92 //
test_vmsle_vv_i8m2_b4(vint8m2_t op1,vint8m2_t op2,size_t vl)93 vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
94 return vmsle(op1, op2, vl);
95 }
96
97 //
98 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4(
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
102 //
test_vmsle_vx_i8m2_b4(vint8m2_t op1,int8_t op2,size_t vl)103 vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
104 return vmsle(op1, op2, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
112 //
test_vmsle_vv_i8m4_b2(vint8m4_t op1,vint8m4_t op2,size_t vl)113 vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
114 return vmsle(op1, op2, vl);
115 }
116
117 //
118 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2(
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
122 //
test_vmsle_vx_i8m4_b2(vint8m4_t op1,int8_t op2,size_t vl)123 vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
124 return vmsle(op1, op2, vl);
125 }
126
127 //
128 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1(
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
132 //
test_vmsle_vv_i8m8_b1(vint8m8_t op1,vint8m8_t op2,size_t vl)133 vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
134 return vmsle(op1, op2, vl);
135 }
136
137 //
138 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1(
139 // CHECK-RV64-NEXT: entry:
140 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
142 //
test_vmsle_vx_i8m8_b1(vint8m8_t op1,int8_t op2,size_t vl)143 vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
144 return vmsle(op1, op2, vl);
145 }
146
147 //
148 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64(
149 // CHECK-RV64-NEXT: entry:
150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
152 //
test_vmsle_vv_i16mf4_b64(vint16mf4_t op1,vint16mf4_t op2,size_t vl)153 vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
154 size_t vl) {
155 return vmsle(op1, op2, vl);
156 }
157
158 //
159 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64(
160 // CHECK-RV64-NEXT: entry:
161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
163 //
test_vmsle_vx_i16mf4_b64(vint16mf4_t op1,int16_t op2,size_t vl)164 vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
165 return vmsle(op1, op2, vl);
166 }
167
168 //
169 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32(
170 // CHECK-RV64-NEXT: entry:
171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
173 //
test_vmsle_vv_i16mf2_b32(vint16mf2_t op1,vint16mf2_t op2,size_t vl)174 vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
175 size_t vl) {
176 return vmsle(op1, op2, vl);
177 }
178
179 //
180 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32(
181 // CHECK-RV64-NEXT: entry:
182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
184 //
test_vmsle_vx_i16mf2_b32(vint16mf2_t op1,int16_t op2,size_t vl)185 vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
186 return vmsle(op1, op2, vl);
187 }
188
189 //
190 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16(
191 // CHECK-RV64-NEXT: entry:
192 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
193 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
194 //
test_vmsle_vv_i16m1_b16(vint16m1_t op1,vint16m1_t op2,size_t vl)195 vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
196 return vmsle(op1, op2, vl);
197 }
198
199 //
200 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16(
201 // CHECK-RV64-NEXT: entry:
202 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
204 //
test_vmsle_vx_i16m1_b16(vint16m1_t op1,int16_t op2,size_t vl)205 vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
206 return vmsle(op1, op2, vl);
207 }
208
209 //
210 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8(
211 // CHECK-RV64-NEXT: entry:
212 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
214 //
test_vmsle_vv_i16m2_b8(vint16m2_t op1,vint16m2_t op2,size_t vl)215 vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
216 return vmsle(op1, op2, vl);
217 }
218
219 //
220 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8(
221 // CHECK-RV64-NEXT: entry:
222 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
223 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
224 //
test_vmsle_vx_i16m2_b8(vint16m2_t op1,int16_t op2,size_t vl)225 vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
226 return vmsle(op1, op2, vl);
227 }
228
229 //
230 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4(
231 // CHECK-RV64-NEXT: entry:
232 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
234 //
test_vmsle_vv_i16m4_b4(vint16m4_t op1,vint16m4_t op2,size_t vl)235 vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
236 return vmsle(op1, op2, vl);
237 }
238
239 //
240 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4(
241 // CHECK-RV64-NEXT: entry:
242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
244 //
test_vmsle_vx_i16m4_b4(vint16m4_t op1,int16_t op2,size_t vl)245 vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
246 return vmsle(op1, op2, vl);
247 }
248
249 //
250 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2(
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
254 //
test_vmsle_vv_i16m8_b2(vint16m8_t op1,vint16m8_t op2,size_t vl)255 vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
256 return vmsle(op1, op2, vl);
257 }
258
259 //
260 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2(
261 // CHECK-RV64-NEXT: entry:
262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
263 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
264 //
test_vmsle_vx_i16m8_b2(vint16m8_t op1,int16_t op2,size_t vl)265 vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
266 return vmsle(op1, op2, vl);
267 }
268
269 //
270 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64(
271 // CHECK-RV64-NEXT: entry:
272 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
274 //
test_vmsle_vv_i32mf2_b64(vint32mf2_t op1,vint32mf2_t op2,size_t vl)275 vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
276 size_t vl) {
277 return vmsle(op1, op2, vl);
278 }
279
280 //
281 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64(
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
284 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
285 //
test_vmsle_vx_i32mf2_b64(vint32mf2_t op1,int32_t op2,size_t vl)286 vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
287 return vmsle(op1, op2, vl);
288 }
289
290 //
291 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32(
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
294 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
295 //
test_vmsle_vv_i32m1_b32(vint32m1_t op1,vint32m1_t op2,size_t vl)296 vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
297 return vmsle(op1, op2, vl);
298 }
299
300 //
301 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32(
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
304 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
305 //
test_vmsle_vx_i32m1_b32(vint32m1_t op1,int32_t op2,size_t vl)306 vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
307 return vmsle(op1, op2, vl);
308 }
309
310 //
311 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16(
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
315 //
test_vmsle_vv_i32m2_b16(vint32m2_t op1,vint32m2_t op2,size_t vl)316 vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
317 return vmsle(op1, op2, vl);
318 }
319
320 //
321 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16(
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
324 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
325 //
test_vmsle_vx_i32m2_b16(vint32m2_t op1,int32_t op2,size_t vl)326 vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
327 return vmsle(op1, op2, vl);
328 }
329
330 //
331 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8(
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
334 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
335 //
test_vmsle_vv_i32m4_b8(vint32m4_t op1,vint32m4_t op2,size_t vl)336 vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
337 return vmsle(op1, op2, vl);
338 }
339
340 //
341 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8(
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
344 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
345 //
test_vmsle_vx_i32m4_b8(vint32m4_t op1,int32_t op2,size_t vl)346 vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
347 return vmsle(op1, op2, vl);
348 }
349
350 //
351 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4(
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
354 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
355 //
test_vmsle_vv_i32m8_b4(vint32m8_t op1,vint32m8_t op2,size_t vl)356 vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
357 return vmsle(op1, op2, vl);
358 }
359
360 //
361 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4(
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
365 //
test_vmsle_vx_i32m8_b4(vint32m8_t op1,int32_t op2,size_t vl)366 vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
367 return vmsle(op1, op2, vl);
368 }
369
370 //
371 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64(
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
375 //
test_vmsle_vv_i64m1_b64(vint64m1_t op1,vint64m1_t op2,size_t vl)376 vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
377 return vmsle(op1, op2, vl);
378 }
379
380 //
381 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64(
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
384 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
385 //
test_vmsle_vx_i64m1_b64(vint64m1_t op1,int64_t op2,size_t vl)386 vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
387 return vmsle(op1, op2, vl);
388 }
389
390 //
391 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32(
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
394 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
395 //
test_vmsle_vv_i64m2_b32(vint64m2_t op1,vint64m2_t op2,size_t vl)396 vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
397 return vmsle(op1, op2, vl);
398 }
399
400 //
401 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32(
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
404 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
405 //
test_vmsle_vx_i64m2_b32(vint64m2_t op1,int64_t op2,size_t vl)406 vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
407 return vmsle(op1, op2, vl);
408 }
409
410 //
411 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16(
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
414 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
415 //
test_vmsle_vv_i64m4_b16(vint64m4_t op1,vint64m4_t op2,size_t vl)416 vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
417 return vmsle(op1, op2, vl);
418 }
419
420 //
421 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16(
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
424 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
425 //
test_vmsle_vx_i64m4_b16(vint64m4_t op1,int64_t op2,size_t vl)426 vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
427 return vmsle(op1, op2, vl);
428 }
429
430 //
431 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8(
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
434 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
435 //
test_vmsle_vv_i64m8_b8(vint64m8_t op1,vint64m8_t op2,size_t vl)436 vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
437 return vmsle(op1, op2, vl);
438 }
439
440 //
441 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8(
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
444 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
445 //
test_vmsle_vx_i64m8_b8(vint64m8_t op1,int64_t op2,size_t vl)446 vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
447 return vmsle(op1, op2, vl);
448 }
449
450 //
451 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64(
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
454 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
455 //
test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)456 vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
457 size_t vl) {
458 return vmsleu(op1, op2, vl);
459 }
460
461 //
462 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64(
463 // CHECK-RV64-NEXT: entry:
464 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
465 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
466 //
test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1,uint8_t op2,size_t vl)467 vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
468 return vmsleu(op1, op2, vl);
469 }
470
471 //
472 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32(
473 // CHECK-RV64-NEXT: entry:
474 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
475 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
476 //
test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)477 vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
478 size_t vl) {
479 return vmsleu(op1, op2, vl);
480 }
481
482 //
483 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32(
484 // CHECK-RV64-NEXT: entry:
485 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
486 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
487 //
test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1,uint8_t op2,size_t vl)488 vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
489 return vmsleu(op1, op2, vl);
490 }
491
492 //
493 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16(
494 // CHECK-RV64-NEXT: entry:
495 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
497 //
test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)498 vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
499 size_t vl) {
500 return vmsleu(op1, op2, vl);
501 }
502
503 //
504 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16(
505 // CHECK-RV64-NEXT: entry:
506 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
508 //
test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1,uint8_t op2,size_t vl)509 vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
510 return vmsleu(op1, op2, vl);
511 }
512
513 //
514 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8(
515 // CHECK-RV64-NEXT: entry:
516 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
517 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
518 //
test_vmsleu_vv_u8m1_b8(vuint8m1_t op1,vuint8m1_t op2,size_t vl)519 vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
520 return vmsleu(op1, op2, vl);
521 }
522
523 //
524 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8(
525 // CHECK-RV64-NEXT: entry:
526 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
527 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
528 //
test_vmsleu_vx_u8m1_b8(vuint8m1_t op1,uint8_t op2,size_t vl)529 vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
530 return vmsleu(op1, op2, vl);
531 }
532
533 //
534 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4(
535 // CHECK-RV64-NEXT: entry:
536 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
537 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
538 //
test_vmsleu_vv_u8m2_b4(vuint8m2_t op1,vuint8m2_t op2,size_t vl)539 vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
540 return vmsleu(op1, op2, vl);
541 }
542
543 //
544 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4(
545 // CHECK-RV64-NEXT: entry:
546 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
547 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
548 //
test_vmsleu_vx_u8m2_b4(vuint8m2_t op1,uint8_t op2,size_t vl)549 vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
550 return vmsleu(op1, op2, vl);
551 }
552
553 //
554 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2(
555 // CHECK-RV64-NEXT: entry:
556 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
557 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
558 //
test_vmsleu_vv_u8m4_b2(vuint8m4_t op1,vuint8m4_t op2,size_t vl)559 vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
560 return vmsleu(op1, op2, vl);
561 }
562
563 //
564 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2(
565 // CHECK-RV64-NEXT: entry:
566 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
567 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
568 //
test_vmsleu_vx_u8m4_b2(vuint8m4_t op1,uint8_t op2,size_t vl)569 vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
570 return vmsleu(op1, op2, vl);
571 }
572
573 //
574 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1(
575 // CHECK-RV64-NEXT: entry:
576 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
577 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
578 //
test_vmsleu_vv_u8m8_b1(vuint8m8_t op1,vuint8m8_t op2,size_t vl)579 vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
580 return vmsleu(op1, op2, vl);
581 }
582
583 //
584 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1(
585 // CHECK-RV64-NEXT: entry:
586 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
587 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
588 //
test_vmsleu_vx_u8m8_b1(vuint8m8_t op1,uint8_t op2,size_t vl)589 vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
590 return vmsleu(op1, op2, vl);
591 }
592
593 //
594 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64(
595 // CHECK-RV64-NEXT: entry:
596 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
597 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
598 //
test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)599 vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
600 size_t vl) {
601 return vmsleu(op1, op2, vl);
602 }
603
604 //
605 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64(
606 // CHECK-RV64-NEXT: entry:
607 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
608 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
609 //
test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1,uint16_t op2,size_t vl)610 vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
611 return vmsleu(op1, op2, vl);
612 }
613
614 //
615 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32(
616 // CHECK-RV64-NEXT: entry:
617 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
618 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
619 //
test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)620 vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
621 size_t vl) {
622 return vmsleu(op1, op2, vl);
623 }
624
625 //
626 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32(
627 // CHECK-RV64-NEXT: entry:
628 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
629 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
630 //
test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1,uint16_t op2,size_t vl)631 vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
632 return vmsleu(op1, op2, vl);
633 }
634
635 //
636 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16(
637 // CHECK-RV64-NEXT: entry:
638 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
639 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
640 //
test_vmsleu_vv_u16m1_b16(vuint16m1_t op1,vuint16m1_t op2,size_t vl)641 vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
642 size_t vl) {
643 return vmsleu(op1, op2, vl);
644 }
645
646 //
647 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16(
648 // CHECK-RV64-NEXT: entry:
649 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
651 //
test_vmsleu_vx_u16m1_b16(vuint16m1_t op1,uint16_t op2,size_t vl)652 vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
653 return vmsleu(op1, op2, vl);
654 }
655
656 //
657 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8(
658 // CHECK-RV64-NEXT: entry:
659 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
660 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
661 //
test_vmsleu_vv_u16m2_b8(vuint16m2_t op1,vuint16m2_t op2,size_t vl)662 vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
663 return vmsleu(op1, op2, vl);
664 }
665
666 //
667 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8(
668 // CHECK-RV64-NEXT: entry:
669 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
670 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
671 //
test_vmsleu_vx_u16m2_b8(vuint16m2_t op1,uint16_t op2,size_t vl)672 vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
673 return vmsleu(op1, op2, vl);
674 }
675
676 //
677 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4(
678 // CHECK-RV64-NEXT: entry:
679 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
680 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
681 //
test_vmsleu_vv_u16m4_b4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)682 vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
683 return vmsleu(op1, op2, vl);
684 }
685
686 //
687 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4(
688 // CHECK-RV64-NEXT: entry:
689 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
690 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
691 //
test_vmsleu_vx_u16m4_b4(vuint16m4_t op1,uint16_t op2,size_t vl)692 vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
693 return vmsleu(op1, op2, vl);
694 }
695
696 //
697 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2(
698 // CHECK-RV64-NEXT: entry:
699 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
700 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
701 //
test_vmsleu_vv_u16m8_b2(vuint16m8_t op1,vuint16m8_t op2,size_t vl)702 vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
703 return vmsleu(op1, op2, vl);
704 }
705
706 //
707 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2(
708 // CHECK-RV64-NEXT: entry:
709 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
710 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
711 //
test_vmsleu_vx_u16m8_b2(vuint16m8_t op1,uint16_t op2,size_t vl)712 vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
713 return vmsleu(op1, op2, vl);
714 }
715
716 //
717 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64(
718 // CHECK-RV64-NEXT: entry:
719 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
720 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
721 //
test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)722 vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
723 size_t vl) {
724 return vmsleu(op1, op2, vl);
725 }
726
727 //
728 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64(
729 // CHECK-RV64-NEXT: entry:
730 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
731 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
732 //
test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1,uint32_t op2,size_t vl)733 vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
734 return vmsleu(op1, op2, vl);
735 }
736
737 //
738 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32(
739 // CHECK-RV64-NEXT: entry:
740 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
741 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
742 //
test_vmsleu_vv_u32m1_b32(vuint32m1_t op1,vuint32m1_t op2,size_t vl)743 vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
744 size_t vl) {
745 return vmsleu(op1, op2, vl);
746 }
747
748 //
749 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32(
750 // CHECK-RV64-NEXT: entry:
751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
752 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
753 //
test_vmsleu_vx_u32m1_b32(vuint32m1_t op1,uint32_t op2,size_t vl)754 vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
755 return vmsleu(op1, op2, vl);
756 }
757
758 //
759 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16(
760 // CHECK-RV64-NEXT: entry:
761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
762 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
763 //
test_vmsleu_vv_u32m2_b16(vuint32m2_t op1,vuint32m2_t op2,size_t vl)764 vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
765 size_t vl) {
766 return vmsleu(op1, op2, vl);
767 }
768
769 //
770 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16(
771 // CHECK-RV64-NEXT: entry:
772 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
773 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
774 //
test_vmsleu_vx_u32m2_b16(vuint32m2_t op1,uint32_t op2,size_t vl)775 vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
776 return vmsleu(op1, op2, vl);
777 }
778
779 //
780 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8(
781 // CHECK-RV64-NEXT: entry:
782 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
783 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
784 //
test_vmsleu_vv_u32m4_b8(vuint32m4_t op1,vuint32m4_t op2,size_t vl)785 vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
786 return vmsleu(op1, op2, vl);
787 }
788
789 //
790 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8(
791 // CHECK-RV64-NEXT: entry:
792 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
794 //
test_vmsleu_vx_u32m4_b8(vuint32m4_t op1,uint32_t op2,size_t vl)795 vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
796 return vmsleu(op1, op2, vl);
797 }
798
799 //
800 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4(
801 // CHECK-RV64-NEXT: entry:
802 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
803 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
804 //
test_vmsleu_vv_u32m8_b4(vuint32m8_t op1,vuint32m8_t op2,size_t vl)805 vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
806 return vmsleu(op1, op2, vl);
807 }
808
809 //
810 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4(
811 // CHECK-RV64-NEXT: entry:
812 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
813 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
814 //
test_vmsleu_vx_u32m8_b4(vuint32m8_t op1,uint32_t op2,size_t vl)815 vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
816 return vmsleu(op1, op2, vl);
817 }
818
819 //
820 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64(
821 // CHECK-RV64-NEXT: entry:
822 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
823 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
824 //
test_vmsleu_vv_u64m1_b64(vuint64m1_t op1,vuint64m1_t op2,size_t vl)825 vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
826 size_t vl) {
827 return vmsleu(op1, op2, vl);
828 }
829
830 //
831 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64(
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
834 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
835 //
test_vmsleu_vx_u64m1_b64(vuint64m1_t op1,uint64_t op2,size_t vl)836 vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
837 return vmsleu(op1, op2, vl);
838 }
839
840 //
841 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32(
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
844 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
845 //
test_vmsleu_vv_u64m2_b32(vuint64m2_t op1,vuint64m2_t op2,size_t vl)846 vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
847 size_t vl) {
848 return vmsleu(op1, op2, vl);
849 }
850
851 //
852 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32(
853 // CHECK-RV64-NEXT: entry:
854 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
855 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
856 //
test_vmsleu_vx_u64m2_b32(vuint64m2_t op1,uint64_t op2,size_t vl)857 vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
858 return vmsleu(op1, op2, vl);
859 }
860
861 //
862 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16(
863 // CHECK-RV64-NEXT: entry:
864 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
865 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
866 //
test_vmsleu_vv_u64m4_b16(vuint64m4_t op1,vuint64m4_t op2,size_t vl)867 vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
868 size_t vl) {
869 return vmsleu(op1, op2, vl);
870 }
871
872 //
873 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16(
874 // CHECK-RV64-NEXT: entry:
875 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
876 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
877 //
test_vmsleu_vx_u64m4_b16(vuint64m4_t op1,uint64_t op2,size_t vl)878 vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
879 return vmsleu(op1, op2, vl);
880 }
881
882 //
883 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8(
884 // CHECK-RV64-NEXT: entry:
885 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
886 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
887 //
test_vmsleu_vv_u64m8_b8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)888 vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
889 return vmsleu(op1, op2, vl);
890 }
891
892 //
893 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8(
894 // CHECK-RV64-NEXT: entry:
895 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
896 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
897 //
test_vmsleu_vx_u64m8_b8(vuint64m8_t op1,uint64_t op2,size_t vl)898 vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
899 return vmsleu(op1, op2, vl);
900 }
901
902 //
903 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m(
904 // CHECK-RV64-NEXT: entry:
905 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
906 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
907 //
test_vmsle_vv_i8mf8_b64_m(vbool64_t mask,vbool64_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)908 vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
909 vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
910 return vmsle(mask, maskedoff, op1, op2, vl);
911 }
912
913 //
914 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m(
915 // CHECK-RV64-NEXT: entry:
916 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
917 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
918 //
test_vmsle_vx_i8mf8_b64_m(vbool64_t mask,vbool64_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)919 vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
920 vint8mf8_t op1, int8_t op2, size_t vl) {
921 return vmsle(mask, maskedoff, op1, op2, vl);
922 }
923
924 //
925 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m(
926 // CHECK-RV64-NEXT: entry:
927 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
928 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
929 //
test_vmsle_vv_i8mf4_b32_m(vbool32_t mask,vbool32_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)930 vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
931 vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
932 return vmsle(mask, maskedoff, op1, op2, vl);
933 }
934
935 //
936 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m(
937 // CHECK-RV64-NEXT: entry:
938 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
939 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
940 //
test_vmsle_vx_i8mf4_b32_m(vbool32_t mask,vbool32_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)941 vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
942 vint8mf4_t op1, int8_t op2, size_t vl) {
943 return vmsle(mask, maskedoff, op1, op2, vl);
944 }
945
946 //
947 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m(
948 // CHECK-RV64-NEXT: entry:
949 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
950 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
951 //
test_vmsle_vv_i8mf2_b16_m(vbool16_t mask,vbool16_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)952 vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
953 vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
954 return vmsle(mask, maskedoff, op1, op2, vl);
955 }
956
957 //
958 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m(
959 // CHECK-RV64-NEXT: entry:
960 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
961 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
962 //
test_vmsle_vx_i8mf2_b16_m(vbool16_t mask,vbool16_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)963 vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
964 vint8mf2_t op1, int8_t op2, size_t vl) {
965 return vmsle(mask, maskedoff, op1, op2, vl);
966 }
967
968 //
969 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m(
970 // CHECK-RV64-NEXT: entry:
971 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
972 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
973 //
test_vmsle_vv_i8m1_b8_m(vbool8_t mask,vbool8_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)974 vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
975 vint8m1_t op1, vint8m1_t op2, size_t vl) {
976 return vmsle(mask, maskedoff, op1, op2, vl);
977 }
978
979 //
980 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m(
981 // CHECK-RV64-NEXT: entry:
982 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
983 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
984 //
test_vmsle_vx_i8m1_b8_m(vbool8_t mask,vbool8_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)985 vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
986 vint8m1_t op1, int8_t op2, size_t vl) {
987 return vmsle(mask, maskedoff, op1, op2, vl);
988 }
989
990 //
991 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m(
992 // CHECK-RV64-NEXT: entry:
993 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
994 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
995 //
test_vmsle_vv_i8m2_b4_m(vbool4_t mask,vbool4_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)996 vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
997 vint8m2_t op1, vint8m2_t op2, size_t vl) {
998 return vmsle(mask, maskedoff, op1, op2, vl);
999 }
1000
1001 //
1002 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m(
1003 // CHECK-RV64-NEXT: entry:
1004 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1005 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1006 //
test_vmsle_vx_i8m2_b4_m(vbool4_t mask,vbool4_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)1007 vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
1008 vint8m2_t op1, int8_t op2, size_t vl) {
1009 return vmsle(mask, maskedoff, op1, op2, vl);
1010 }
1011
1012 //
1013 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m(
1014 // CHECK-RV64-NEXT: entry:
1015 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1016 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1017 //
test_vmsle_vv_i8m4_b2_m(vbool2_t mask,vbool2_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)1018 vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
1019 vint8m4_t op1, vint8m4_t op2, size_t vl) {
1020 return vmsle(mask, maskedoff, op1, op2, vl);
1021 }
1022
1023 //
1024 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m(
1025 // CHECK-RV64-NEXT: entry:
1026 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1027 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1028 //
test_vmsle_vx_i8m4_b2_m(vbool2_t mask,vbool2_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)1029 vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
1030 vint8m4_t op1, int8_t op2, size_t vl) {
1031 return vmsle(mask, maskedoff, op1, op2, vl);
1032 }
1033
1034 //
1035 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m(
1036 // CHECK-RV64-NEXT: entry:
1037 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1038 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1039 //
test_vmsle_vv_i8m8_b1_m(vbool1_t mask,vbool1_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)1040 vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
1041 vint8m8_t op1, vint8m8_t op2, size_t vl) {
1042 return vmsle(mask, maskedoff, op1, op2, vl);
1043 }
1044
1045 //
1046 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m(
1047 // CHECK-RV64-NEXT: entry:
1048 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1049 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1050 //
test_vmsle_vx_i8m8_b1_m(vbool1_t mask,vbool1_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)1051 vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
1052 vint8m8_t op1, int8_t op2, size_t vl) {
1053 return vmsle(mask, maskedoff, op1, op2, vl);
1054 }
1055
1056 //
1057 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m(
1058 // CHECK-RV64-NEXT: entry:
1059 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1060 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1061 //
test_vmsle_vv_i16mf4_b64_m(vbool64_t mask,vbool64_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1062 vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
1063 vint16mf4_t op1, vint16mf4_t op2,
1064 size_t vl) {
1065 return vmsle(mask, maskedoff, op1, op2, vl);
1066 }
1067
1068 //
1069 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m(
1070 // CHECK-RV64-NEXT: entry:
1071 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1072 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1073 //
test_vmsle_vx_i16mf4_b64_m(vbool64_t mask,vbool64_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)1074 vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
1075 vint16mf4_t op1, int16_t op2, size_t vl) {
1076 return vmsle(mask, maskedoff, op1, op2, vl);
1077 }
1078
1079 //
1080 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m(
1081 // CHECK-RV64-NEXT: entry:
1082 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1083 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1084 //
test_vmsle_vv_i16mf2_b32_m(vbool32_t mask,vbool32_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1085 vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1086 vint16mf2_t op1, vint16mf2_t op2,
1087 size_t vl) {
1088 return vmsle(mask, maskedoff, op1, op2, vl);
1089 }
1090
1091 //
1092 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m(
1093 // CHECK-RV64-NEXT: entry:
1094 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1095 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1096 //
test_vmsle_vx_i16mf2_b32_m(vbool32_t mask,vbool32_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)1097 vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1098 vint16mf2_t op1, int16_t op2, size_t vl) {
1099 return vmsle(mask, maskedoff, op1, op2, vl);
1100 }
1101
1102 //
1103 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m(
1104 // CHECK-RV64-NEXT: entry:
1105 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1106 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1107 //
test_vmsle_vv_i16m1_b16_m(vbool16_t mask,vbool16_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)1108 vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
1109 vint16m1_t op1, vint16m1_t op2, size_t vl) {
1110 return vmsle(mask, maskedoff, op1, op2, vl);
1111 }
1112
1113 //
1114 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m(
1115 // CHECK-RV64-NEXT: entry:
1116 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1117 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1118 //
test_vmsle_vx_i16m1_b16_m(vbool16_t mask,vbool16_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)1119 vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
1120 vint16m1_t op1, int16_t op2, size_t vl) {
1121 return vmsle(mask, maskedoff, op1, op2, vl);
1122 }
1123
1124 //
1125 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m(
1126 // CHECK-RV64-NEXT: entry:
1127 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1128 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1129 //
test_vmsle_vv_i16m2_b8_m(vbool8_t mask,vbool8_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)1130 vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
1131 vint16m2_t op1, vint16m2_t op2, size_t vl) {
1132 return vmsle(mask, maskedoff, op1, op2, vl);
1133 }
1134
1135 //
1136 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m(
1137 // CHECK-RV64-NEXT: entry:
1138 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1139 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1140 //
test_vmsle_vx_i16m2_b8_m(vbool8_t mask,vbool8_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)1141 vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
1142 vint16m2_t op1, int16_t op2, size_t vl) {
1143 return vmsle(mask, maskedoff, op1, op2, vl);
1144 }
1145
1146 //
1147 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m(
1148 // CHECK-RV64-NEXT: entry:
1149 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1150 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1151 //
test_vmsle_vv_i16m4_b4_m(vbool4_t mask,vbool4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1152 vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
1153 vint16m4_t op1, vint16m4_t op2, size_t vl) {
1154 return vmsle(mask, maskedoff, op1, op2, vl);
1155 }
1156
1157 //
1158 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m(
1159 // CHECK-RV64-NEXT: entry:
1160 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1161 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1162 //
test_vmsle_vx_i16m4_b4_m(vbool4_t mask,vbool4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1163 vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
1164 vint16m4_t op1, int16_t op2, size_t vl) {
1165 return vmsle(mask, maskedoff, op1, op2, vl);
1166 }
1167
1168 //
1169 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m(
1170 // CHECK-RV64-NEXT: entry:
1171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1172 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1173 //
test_vmsle_vv_i16m8_b2_m(vbool2_t mask,vbool2_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)1174 vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
1175 vint16m8_t op1, vint16m8_t op2, size_t vl) {
1176 return vmsle(mask, maskedoff, op1, op2, vl);
1177 }
1178
1179 //
1180 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m(
1181 // CHECK-RV64-NEXT: entry:
1182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1183 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1184 //
test_vmsle_vx_i16m8_b2_m(vbool2_t mask,vbool2_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)1185 vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
1186 vint16m8_t op1, int16_t op2, size_t vl) {
1187 return vmsle(mask, maskedoff, op1, op2, vl);
1188 }
1189
1190 //
1191 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m(
1192 // CHECK-RV64-NEXT: entry:
1193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1194 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1195 //
test_vmsle_vv_i32mf2_b64_m(vbool64_t mask,vbool64_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1196 vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
1197 vint32mf2_t op1, vint32mf2_t op2,
1198 size_t vl) {
1199 return vmsle(mask, maskedoff, op1, op2, vl);
1200 }
1201
1202 //
1203 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m(
1204 // CHECK-RV64-NEXT: entry:
1205 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1206 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1207 //
test_vmsle_vx_i32mf2_b64_m(vbool64_t mask,vbool64_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1208 vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
1209 vint32mf2_t op1, int32_t op2, size_t vl) {
1210 return vmsle(mask, maskedoff, op1, op2, vl);
1211 }
1212
1213 //
1214 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m(
1215 // CHECK-RV64-NEXT: entry:
1216 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1217 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1218 //
test_vmsle_vv_i32m1_b32_m(vbool32_t mask,vbool32_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1219 vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
1220 vint32m1_t op1, vint32m1_t op2, size_t vl) {
1221 return vmsle(mask, maskedoff, op1, op2, vl);
1222 }
1223
1224 //
1225 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m(
1226 // CHECK-RV64-NEXT: entry:
1227 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1228 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1229 //
test_vmsle_vx_i32m1_b32_m(vbool32_t mask,vbool32_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1230 vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
1231 vint32m1_t op1, int32_t op2, size_t vl) {
1232 return vmsle(mask, maskedoff, op1, op2, vl);
1233 }
1234
1235 //
1236 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m(
1237 // CHECK-RV64-NEXT: entry:
1238 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1239 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1240 //
test_vmsle_vv_i32m2_b16_m(vbool16_t mask,vbool16_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1241 vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
1242 vint32m2_t op1, vint32m2_t op2, size_t vl) {
1243 return vmsle(mask, maskedoff, op1, op2, vl);
1244 }
1245
1246 //
1247 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m(
1248 // CHECK-RV64-NEXT: entry:
1249 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1250 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1251 //
test_vmsle_vx_i32m2_b16_m(vbool16_t mask,vbool16_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1252 vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
1253 vint32m2_t op1, int32_t op2, size_t vl) {
1254 return vmsle(mask, maskedoff, op1, op2, vl);
1255 }
1256
1257 //
1258 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m(
1259 // CHECK-RV64-NEXT: entry:
1260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1261 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1262 //
test_vmsle_vv_i32m4_b8_m(vbool8_t mask,vbool8_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1263 vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
1264 vint32m4_t op1, vint32m4_t op2, size_t vl) {
1265 return vmsle(mask, maskedoff, op1, op2, vl);
1266 }
1267
1268 //
1269 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m(
1270 // CHECK-RV64-NEXT: entry:
1271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1272 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1273 //
test_vmsle_vx_i32m4_b8_m(vbool8_t mask,vbool8_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1274 vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
1275 vint32m4_t op1, int32_t op2, size_t vl) {
1276 return vmsle(mask, maskedoff, op1, op2, vl);
1277 }
1278
1279 //
1280 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m(
1281 // CHECK-RV64-NEXT: entry:
1282 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1283 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1284 //
test_vmsle_vv_i32m8_b4_m(vbool4_t mask,vbool4_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)1285 vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
1286 vint32m8_t op1, vint32m8_t op2, size_t vl) {
1287 return vmsle(mask, maskedoff, op1, op2, vl);
1288 }
1289
1290 //
1291 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m(
1292 // CHECK-RV64-NEXT: entry:
1293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1294 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1295 //
test_vmsle_vx_i32m8_b4_m(vbool4_t mask,vbool4_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)1296 vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
1297 vint32m8_t op1, int32_t op2, size_t vl) {
1298 return vmsle(mask, maskedoff, op1, op2, vl);
1299 }
1300
1301 //
1302 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m(
1303 // CHECK-RV64-NEXT: entry:
1304 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1305 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1306 //
test_vmsle_vv_i64m1_b64_m(vbool64_t mask,vbool64_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)1307 vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
1308 vint64m1_t op1, vint64m1_t op2, size_t vl) {
1309 return vmsle(mask, maskedoff, op1, op2, vl);
1310 }
1311
1312 //
1313 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m(
1314 // CHECK-RV64-NEXT: entry:
1315 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1316 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1317 //
test_vmsle_vx_i64m1_b64_m(vbool64_t mask,vbool64_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)1318 vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
1319 vint64m1_t op1, int64_t op2, size_t vl) {
1320 return vmsle(mask, maskedoff, op1, op2, vl);
1321 }
1322
1323 //
1324 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m(
1325 // CHECK-RV64-NEXT: entry:
1326 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1327 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1328 //
test_vmsle_vv_i64m2_b32_m(vbool32_t mask,vbool32_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)1329 vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1330 vint64m2_t op1, vint64m2_t op2, size_t vl) {
1331 return vmsle(mask, maskedoff, op1, op2, vl);
1332 }
1333
1334 //
1335 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m(
1336 // CHECK-RV64-NEXT: entry:
1337 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1338 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1339 //
test_vmsle_vx_i64m2_b32_m(vbool32_t mask,vbool32_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)1340 vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1341 vint64m2_t op1, int64_t op2, size_t vl) {
1342 return vmsle(mask, maskedoff, op1, op2, vl);
1343 }
1344
1345 //
1346 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m(
1347 // CHECK-RV64-NEXT: entry:
1348 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1349 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1350 //
test_vmsle_vv_i64m4_b16_m(vbool16_t mask,vbool16_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)1351 vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
1352 vint64m4_t op1, vint64m4_t op2, size_t vl) {
1353 return vmsle(mask, maskedoff, op1, op2, vl);
1354 }
1355
1356 //
1357 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m(
1358 // CHECK-RV64-NEXT: entry:
1359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1360 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1361 //
test_vmsle_vx_i64m4_b16_m(vbool16_t mask,vbool16_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)1362 vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
1363 vint64m4_t op1, int64_t op2, size_t vl) {
1364 return vmsle(mask, maskedoff, op1, op2, vl);
1365 }
1366
1367 //
1368 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m(
1369 // CHECK-RV64-NEXT: entry:
1370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1371 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1372 //
test_vmsle_vv_i64m8_b8_m(vbool8_t mask,vbool8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)1373 vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
1374 vint64m8_t op1, vint64m8_t op2, size_t vl) {
1375 return vmsle(mask, maskedoff, op1, op2, vl);
1376 }
1377
1378 //
1379 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m(
1380 // CHECK-RV64-NEXT: entry:
1381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1382 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1383 //
test_vmsle_vx_i64m8_b8_m(vbool8_t mask,vbool8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)1384 vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
1385 vint64m8_t op1, int64_t op2, size_t vl) {
1386 return vmsle(mask, maskedoff, op1, op2, vl);
1387 }
1388
1389 //
1390 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
1391 // CHECK-RV64-NEXT: entry:
1392 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1393 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1394 //
test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1395 vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
1396 vuint8mf8_t op1, vuint8mf8_t op2,
1397 size_t vl) {
1398 return vmsleu(mask, maskedoff, op1, op2, vl);
1399 }
1400
1401 //
1402 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
1403 // CHECK-RV64-NEXT: entry:
1404 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1405 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1406 //
test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1407 vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
1408 vuint8mf8_t op1, uint8_t op2, size_t vl) {
1409 return vmsleu(mask, maskedoff, op1, op2, vl);
1410 }
1411
1412 //
1413 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
1414 // CHECK-RV64-NEXT: entry:
1415 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1416 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1417 //
test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1418 vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
1419 vuint8mf4_t op1, vuint8mf4_t op2,
1420 size_t vl) {
1421 return vmsleu(mask, maskedoff, op1, op2, vl);
1422 }
1423
1424 //
1425 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
1426 // CHECK-RV64-NEXT: entry:
1427 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1428 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1429 //
test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1430 vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
1431 vuint8mf4_t op1, uint8_t op2, size_t vl) {
1432 return vmsleu(mask, maskedoff, op1, op2, vl);
1433 }
1434
1435 //
1436 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
1437 // CHECK-RV64-NEXT: entry:
1438 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1439 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1440 //
test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1441 vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
1442 vuint8mf2_t op1, vuint8mf2_t op2,
1443 size_t vl) {
1444 return vmsleu(mask, maskedoff, op1, op2, vl);
1445 }
1446
1447 //
1448 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
1449 // CHECK-RV64-NEXT: entry:
1450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1451 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1452 //
test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1453 vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
1454 vuint8mf2_t op1, uint8_t op2, size_t vl) {
1455 return vmsleu(mask, maskedoff, op1, op2, vl);
1456 }
1457
1458 //
1459 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m(
1460 // CHECK-RV64-NEXT: entry:
1461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1462 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1463 //
test_vmsleu_vv_u8m1_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1464 vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
1465 vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1466 return vmsleu(mask, maskedoff, op1, op2, vl);
1467 }
1468
1469 //
1470 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m(
1471 // CHECK-RV64-NEXT: entry:
1472 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1473 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1474 //
test_vmsleu_vx_u8m1_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)1475 vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
1476 vuint8m1_t op1, uint8_t op2, size_t vl) {
1477 return vmsleu(mask, maskedoff, op1, op2, vl);
1478 }
1479
1480 //
1481 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m(
1482 // CHECK-RV64-NEXT: entry:
1483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1484 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1485 //
test_vmsleu_vv_u8m2_b4_m(vbool4_t mask,vbool4_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1486 vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
1487 vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1488 return vmsleu(mask, maskedoff, op1, op2, vl);
1489 }
1490
1491 //
1492 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m(
1493 // CHECK-RV64-NEXT: entry:
1494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1495 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1496 //
test_vmsleu_vx_u8m2_b4_m(vbool4_t mask,vbool4_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)1497 vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
1498 vuint8m2_t op1, uint8_t op2, size_t vl) {
1499 return vmsleu(mask, maskedoff, op1, op2, vl);
1500 }
1501
1502 //
1503 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m(
1504 // CHECK-RV64-NEXT: entry:
1505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1506 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1507 //
test_vmsleu_vv_u8m4_b2_m(vbool2_t mask,vbool2_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1508 vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
1509 vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1510 return vmsleu(mask, maskedoff, op1, op2, vl);
1511 }
1512
1513 //
1514 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m(
1515 // CHECK-RV64-NEXT: entry:
1516 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1517 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1518 //
test_vmsleu_vx_u8m4_b2_m(vbool2_t mask,vbool2_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)1519 vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
1520 vuint8m4_t op1, uint8_t op2, size_t vl) {
1521 return vmsleu(mask, maskedoff, op1, op2, vl);
1522 }
1523
1524 //
1525 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m(
1526 // CHECK-RV64-NEXT: entry:
1527 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1528 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1529 //
test_vmsleu_vv_u8m8_b1_m(vbool1_t mask,vbool1_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)1530 vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
1531 vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1532 return vmsleu(mask, maskedoff, op1, op2, vl);
1533 }
1534
1535 //
1536 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m(
1537 // CHECK-RV64-NEXT: entry:
1538 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1539 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1540 //
test_vmsleu_vx_u8m8_b1_m(vbool1_t mask,vbool1_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)1541 vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
1542 vuint8m8_t op1, uint8_t op2, size_t vl) {
1543 return vmsleu(mask, maskedoff, op1, op2, vl);
1544 }
1545
1546 //
1547 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
1548 // CHECK-RV64-NEXT: entry:
1549 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1550 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1551 //
test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1552 vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
1553 vuint16mf4_t op1, vuint16mf4_t op2,
1554 size_t vl) {
1555 return vmsleu(mask, maskedoff, op1, op2, vl);
1556 }
1557
1558 //
1559 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
1560 // CHECK-RV64-NEXT: entry:
1561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1562 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1563 //
test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)1564 vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
1565 vuint16mf4_t op1, uint16_t op2,
1566 size_t vl) {
1567 return vmsleu(mask, maskedoff, op1, op2, vl);
1568 }
1569
1570 //
1571 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
1572 // CHECK-RV64-NEXT: entry:
1573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1574 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1575 //
test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1576 vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1577 vuint16mf2_t op1, vuint16mf2_t op2,
1578 size_t vl) {
1579 return vmsleu(mask, maskedoff, op1, op2, vl);
1580 }
1581
1582 //
1583 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
1584 // CHECK-RV64-NEXT: entry:
1585 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1586 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1587 //
test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)1588 vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1589 vuint16mf2_t op1, uint16_t op2,
1590 size_t vl) {
1591 return vmsleu(mask, maskedoff, op1, op2, vl);
1592 }
1593
1594 //
1595 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m(
1596 // CHECK-RV64-NEXT: entry:
1597 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1598 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1599 //
test_vmsleu_vv_u16m1_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1600 vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
1601 vuint16m1_t op1, vuint16m1_t op2,
1602 size_t vl) {
1603 return vmsleu(mask, maskedoff, op1, op2, vl);
1604 }
1605
1606 //
1607 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m(
1608 // CHECK-RV64-NEXT: entry:
1609 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1610 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1611 //
test_vmsleu_vx_u16m1_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)1612 vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
1613 vuint16m1_t op1, uint16_t op2, size_t vl) {
1614 return vmsleu(mask, maskedoff, op1, op2, vl);
1615 }
1616
1617 //
1618 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m(
1619 // CHECK-RV64-NEXT: entry:
1620 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1621 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1622 //
test_vmsleu_vv_u16m2_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1623 vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
1624 vuint16m2_t op1, vuint16m2_t op2,
1625 size_t vl) {
1626 return vmsleu(mask, maskedoff, op1, op2, vl);
1627 }
1628
1629 //
1630 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m(
1631 // CHECK-RV64-NEXT: entry:
1632 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1633 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1634 //
test_vmsleu_vx_u16m2_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)1635 vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
1636 vuint16m2_t op1, uint16_t op2, size_t vl) {
1637 return vmsleu(mask, maskedoff, op1, op2, vl);
1638 }
1639
1640 //
1641 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m(
1642 // CHECK-RV64-NEXT: entry:
1643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1644 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1645 //
test_vmsleu_vv_u16m4_b4_m(vbool4_t mask,vbool4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1646 vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
1647 vuint16m4_t op1, vuint16m4_t op2,
1648 size_t vl) {
1649 return vmsleu(mask, maskedoff, op1, op2, vl);
1650 }
1651
1652 //
1653 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m(
1654 // CHECK-RV64-NEXT: entry:
1655 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1656 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1657 //
test_vmsleu_vx_u16m4_b4_m(vbool4_t mask,vbool4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)1658 vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
1659 vuint16m4_t op1, uint16_t op2, size_t vl) {
1660 return vmsleu(mask, maskedoff, op1, op2, vl);
1661 }
1662
1663 //
1664 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m(
1665 // CHECK-RV64-NEXT: entry:
1666 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1667 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1668 //
test_vmsleu_vv_u16m8_b2_m(vbool2_t mask,vbool2_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)1669 vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
1670 vuint16m8_t op1, vuint16m8_t op2,
1671 size_t vl) {
1672 return vmsleu(mask, maskedoff, op1, op2, vl);
1673 }
1674
1675 //
1676 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m(
1677 // CHECK-RV64-NEXT: entry:
1678 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1679 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1680 //
test_vmsleu_vx_u16m8_b2_m(vbool2_t mask,vbool2_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)1681 vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
1682 vuint16m8_t op1, uint16_t op2, size_t vl) {
1683 return vmsleu(mask, maskedoff, op1, op2, vl);
1684 }
1685
1686 //
1687 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
1688 // CHECK-RV64-NEXT: entry:
1689 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1690 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1691 //
test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1692 vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
1693 vuint32mf2_t op1, vuint32mf2_t op2,
1694 size_t vl) {
1695 return vmsleu(mask, maskedoff, op1, op2, vl);
1696 }
1697
1698 //
1699 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
1700 // CHECK-RV64-NEXT: entry:
1701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1702 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1703 //
test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)1704 vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
1705 vuint32mf2_t op1, uint32_t op2,
1706 size_t vl) {
1707 return vmsleu(mask, maskedoff, op1, op2, vl);
1708 }
1709
1710 //
1711 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m(
1712 // CHECK-RV64-NEXT: entry:
1713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1714 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1715 //
test_vmsleu_vv_u32m1_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1716 vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
1717 vuint32m1_t op1, vuint32m1_t op2,
1718 size_t vl) {
1719 return vmsleu(mask, maskedoff, op1, op2, vl);
1720 }
1721
1722 //
1723 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m(
1724 // CHECK-RV64-NEXT: entry:
1725 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1726 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1727 //
test_vmsleu_vx_u32m1_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)1728 vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
1729 vuint32m1_t op1, uint32_t op2, size_t vl) {
1730 return vmsleu(mask, maskedoff, op1, op2, vl);
1731 }
1732
1733 //
1734 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m(
1735 // CHECK-RV64-NEXT: entry:
1736 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1737 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1738 //
test_vmsleu_vv_u32m2_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1739 vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
1740 vuint32m2_t op1, vuint32m2_t op2,
1741 size_t vl) {
1742 return vmsleu(mask, maskedoff, op1, op2, vl);
1743 }
1744
1745 //
1746 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m(
1747 // CHECK-RV64-NEXT: entry:
1748 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1749 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1750 //
test_vmsleu_vx_u32m2_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)1751 vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
1752 vuint32m2_t op1, uint32_t op2, size_t vl) {
1753 return vmsleu(mask, maskedoff, op1, op2, vl);
1754 }
1755
1756 //
1757 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m(
1758 // CHECK-RV64-NEXT: entry:
1759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1760 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1761 //
test_vmsleu_vv_u32m4_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1762 vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
1763 vuint32m4_t op1, vuint32m4_t op2,
1764 size_t vl) {
1765 return vmsleu(mask, maskedoff, op1, op2, vl);
1766 }
1767
1768 //
1769 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m(
1770 // CHECK-RV64-NEXT: entry:
1771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1772 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1773 //
test_vmsleu_vx_u32m4_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)1774 vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
1775 vuint32m4_t op1, uint32_t op2, size_t vl) {
1776 return vmsleu(mask, maskedoff, op1, op2, vl);
1777 }
1778
1779 //
1780 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m(
1781 // CHECK-RV64-NEXT: entry:
1782 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1783 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1784 //
test_vmsleu_vv_u32m8_b4_m(vbool4_t mask,vbool4_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)1785 vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
1786 vuint32m8_t op1, vuint32m8_t op2,
1787 size_t vl) {
1788 return vmsleu(mask, maskedoff, op1, op2, vl);
1789 }
1790
1791 //
1792 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m(
1793 // CHECK-RV64-NEXT: entry:
1794 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1795 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1796 //
test_vmsleu_vx_u32m8_b4_m(vbool4_t mask,vbool4_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)1797 vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
1798 vuint32m8_t op1, uint32_t op2, size_t vl) {
1799 return vmsleu(mask, maskedoff, op1, op2, vl);
1800 }
1801
1802 //
1803 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m(
1804 // CHECK-RV64-NEXT: entry:
1805 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1806 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1807 //
test_vmsleu_vv_u64m1_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)1808 vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
1809 vuint64m1_t op1, vuint64m1_t op2,
1810 size_t vl) {
1811 return vmsleu(mask, maskedoff, op1, op2, vl);
1812 }
1813
1814 //
1815 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m(
1816 // CHECK-RV64-NEXT: entry:
1817 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1818 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1819 //
test_vmsleu_vx_u64m1_b64_m(vbool64_t mask,vbool64_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)1820 vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
1821 vuint64m1_t op1, uint64_t op2, size_t vl) {
1822 return vmsleu(mask, maskedoff, op1, op2, vl);
1823 }
1824
1825 //
1826 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m(
1827 // CHECK-RV64-NEXT: entry:
1828 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1829 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1830 //
test_vmsleu_vv_u64m2_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)1831 vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1832 vuint64m2_t op1, vuint64m2_t op2,
1833 size_t vl) {
1834 return vmsleu(mask, maskedoff, op1, op2, vl);
1835 }
1836
1837 //
1838 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m(
1839 // CHECK-RV64-NEXT: entry:
1840 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1841 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1842 //
test_vmsleu_vx_u64m2_b32_m(vbool32_t mask,vbool32_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)1843 vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
1844 vuint64m2_t op1, uint64_t op2, size_t vl) {
1845 return vmsleu(mask, maskedoff, op1, op2, vl);
1846 }
1847
1848 //
1849 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m(
1850 // CHECK-RV64-NEXT: entry:
1851 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1852 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1853 //
test_vmsleu_vv_u64m4_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)1854 vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
1855 vuint64m4_t op1, vuint64m4_t op2,
1856 size_t vl) {
1857 return vmsleu(mask, maskedoff, op1, op2, vl);
1858 }
1859
1860 //
1861 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m(
1862 // CHECK-RV64-NEXT: entry:
1863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1864 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1865 //
test_vmsleu_vx_u64m4_b16_m(vbool16_t mask,vbool16_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)1866 vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
1867 vuint64m4_t op1, uint64_t op2, size_t vl) {
1868 return vmsleu(mask, maskedoff, op1, op2, vl);
1869 }
1870
1871 //
1872 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m(
1873 // CHECK-RV64-NEXT: entry:
1874 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1875 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1876 //
test_vmsleu_vv_u64m8_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)1877 vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
1878 vuint64m8_t op1, vuint64m8_t op2,
1879 size_t vl) {
1880 return vmsleu(mask, maskedoff, op1, op2, vl);
1881 }
1882
1883 //
1884 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m(
1885 // CHECK-RV64-NEXT: entry:
1886 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1887 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1888 //
test_vmsleu_vx_u64m8_b8_m(vbool8_t mask,vbool8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)1889 vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
1890 vuint64m8_t op1, uint64_t op2, size_t vl) {
1891 return vmsleu(mask, maskedoff, op1, op2, vl);
1892 }
1893