1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
13 //
test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)14 vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
15                                    size_t vl) {
16   return vmfle_vv_f32mf2_b64(op1, op2, vl);
17 }
18 
19 //
20 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64(
21 // CHECK-RV64-NEXT:  entry:
22 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
24 //
test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1,float op2,size_t vl)25 vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
26   return vmfle_vf_f32mf2_b64(op1, op2, vl);
27 }
28 
29 //
30 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32(
31 // CHECK-RV64-NEXT:  entry:
32 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
34 //
test_vmfle_vv_f32m1_b32(vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)35 vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
36                                   size_t vl) {
37   return vmfle_vv_f32m1_b32(op1, op2, vl);
38 }
39 
40 //
41 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32(
42 // CHECK-RV64-NEXT:  entry:
43 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
44 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
45 //
test_vmfle_vf_f32m1_b32(vfloat32m1_t op1,float op2,size_t vl)46 vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
47   return vmfle_vf_f32m1_b32(op1, op2, vl);
48 }
49 
50 //
51 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16(
52 // CHECK-RV64-NEXT:  entry:
53 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
54 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
55 //
test_vmfle_vv_f32m2_b16(vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)56 vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
57                                   size_t vl) {
58   return vmfle_vv_f32m2_b16(op1, op2, vl);
59 }
60 
61 //
62 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16(
63 // CHECK-RV64-NEXT:  entry:
64 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
65 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
66 //
test_vmfle_vf_f32m2_b16(vfloat32m2_t op1,float op2,size_t vl)67 vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
68   return vmfle_vf_f32m2_b16(op1, op2, vl);
69 }
70 
71 //
72 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
76 //
test_vmfle_vv_f32m4_b8(vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)77 vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
78   return vmfle_vv_f32m4_b8(op1, op2, vl);
79 }
80 
81 //
82 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8(
83 // CHECK-RV64-NEXT:  entry:
84 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
85 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
86 //
test_vmfle_vf_f32m4_b8(vfloat32m4_t op1,float op2,size_t vl)87 vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
88   return vmfle_vf_f32m4_b8(op1, op2, vl);
89 }
90 
91 //
92 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4(
93 // CHECK-RV64-NEXT:  entry:
94 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
95 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
96 //
test_vmfle_vv_f32m8_b4(vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)97 vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
98   return vmfle_vv_f32m8_b4(op1, op2, vl);
99 }
100 
101 //
102 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4(
103 // CHECK-RV64-NEXT:  entry:
104 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
105 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
106 //
test_vmfle_vf_f32m8_b4(vfloat32m8_t op1,float op2,size_t vl)107 vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
108   return vmfle_vf_f32m8_b4(op1, op2, vl);
109 }
110 
111 //
112 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64(
113 // CHECK-RV64-NEXT:  entry:
114 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
115 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
116 //
test_vmfle_vv_f64m1_b64(vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)117 vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
118                                   size_t vl) {
119   return vmfle_vv_f64m1_b64(op1, op2, vl);
120 }
121 
122 //
123 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64(
124 // CHECK-RV64-NEXT:  entry:
125 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
126 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
127 //
test_vmfle_vf_f64m1_b64(vfloat64m1_t op1,double op2,size_t vl)128 vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
129   return vmfle_vf_f64m1_b64(op1, op2, vl);
130 }
131 
132 //
133 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32(
134 // CHECK-RV64-NEXT:  entry:
135 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
136 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
137 //
test_vmfle_vv_f64m2_b32(vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)138 vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
139                                   size_t vl) {
140   return vmfle_vv_f64m2_b32(op1, op2, vl);
141 }
142 
143 //
144 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32(
145 // CHECK-RV64-NEXT:  entry:
146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
147 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
148 //
test_vmfle_vf_f64m2_b32(vfloat64m2_t op1,double op2,size_t vl)149 vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
150   return vmfle_vf_f64m2_b32(op1, op2, vl);
151 }
152 
153 //
154 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16(
155 // CHECK-RV64-NEXT:  entry:
156 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
157 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
158 //
test_vmfle_vv_f64m4_b16(vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)159 vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
160                                   size_t vl) {
161   return vmfle_vv_f64m4_b16(op1, op2, vl);
162 }
163 
164 //
165 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16(
166 // CHECK-RV64-NEXT:  entry:
167 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
168 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
169 //
test_vmfle_vf_f64m4_b16(vfloat64m4_t op1,double op2,size_t vl)170 vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
171   return vmfle_vf_f64m4_b16(op1, op2, vl);
172 }
173 
174 //
175 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8(
176 // CHECK-RV64-NEXT:  entry:
177 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
178 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
179 //
test_vmfle_vv_f64m8_b8(vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)180 vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
181   return vmfle_vv_f64m8_b8(op1, op2, vl);
182 }
183 
184 //
185 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8(
186 // CHECK-RV64-NEXT:  entry:
187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
189 //
test_vmfle_vf_f64m8_b8(vfloat64m8_t op1,double op2,size_t vl)190 vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
191   return vmfle_vf_f64m8_b8(op1, op2, vl);
192 }
193 
194 //
195 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m(
196 // CHECK-RV64-NEXT:  entry:
197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
198 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
199 //
test_vmfle_vv_f32mf2_b64_m(vbool64_t mask,vbool64_t maskedoff,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)200 vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
201                                      vfloat32mf2_t op1, vfloat32mf2_t op2,
202                                      size_t vl) {
203   return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
204 }
205 
206 //
207 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m(
208 // CHECK-RV64-NEXT:  entry:
209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
211 //
test_vmfle_vf_f32mf2_b64_m(vbool64_t mask,vbool64_t maskedoff,vfloat32mf2_t op1,float op2,size_t vl)212 vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
213                                      vfloat32mf2_t op1, float op2, size_t vl) {
214   return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
222 //
test_vmfle_vv_f32m1_b32_m(vbool32_t mask,vbool32_t maskedoff,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)223 vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
224                                     vfloat32m1_t op1, vfloat32m1_t op2,
225                                     size_t vl) {
226   return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
227 }
228 
229 //
230 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m(
231 // CHECK-RV64-NEXT:  entry:
232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
234 //
test_vmfle_vf_f32m1_b32_m(vbool32_t mask,vbool32_t maskedoff,vfloat32m1_t op1,float op2,size_t vl)235 vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
236                                     vfloat32m1_t op1, float op2, size_t vl) {
237   return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
238 }
239 
240 //
241 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m(
242 // CHECK-RV64-NEXT:  entry:
243 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
245 //
test_vmfle_vv_f32m2_b16_m(vbool16_t mask,vbool16_t maskedoff,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)246 vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
247                                     vfloat32m2_t op1, vfloat32m2_t op2,
248                                     size_t vl) {
249   return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
250 }
251 
252 //
253 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m(
254 // CHECK-RV64-NEXT:  entry:
255 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
256 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
257 //
test_vmfle_vf_f32m2_b16_m(vbool16_t mask,vbool16_t maskedoff,vfloat32m2_t op1,float op2,size_t vl)258 vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
259                                     vfloat32m2_t op1, float op2, size_t vl) {
260   return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
261 }
262 
263 //
264 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m(
265 // CHECK-RV64-NEXT:  entry:
266 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
267 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
268 //
test_vmfle_vv_f32m4_b8_m(vbool8_t mask,vbool8_t maskedoff,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)269 vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
270                                   vfloat32m4_t op1, vfloat32m4_t op2,
271                                   size_t vl) {
272   return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
273 }
274 
275 //
276 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m(
277 // CHECK-RV64-NEXT:  entry:
278 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
279 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
280 //
test_vmfle_vf_f32m4_b8_m(vbool8_t mask,vbool8_t maskedoff,vfloat32m4_t op1,float op2,size_t vl)281 vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
282                                   vfloat32m4_t op1, float op2, size_t vl) {
283   return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
284 }
285 
286 //
287 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m(
288 // CHECK-RV64-NEXT:  entry:
289 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
290 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
291 //
test_vmfle_vv_f32m8_b4_m(vbool4_t mask,vbool4_t maskedoff,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)292 vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
293                                   vfloat32m8_t op1, vfloat32m8_t op2,
294                                   size_t vl) {
295   return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
296 }
297 
298 //
299 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
303 //
test_vmfle_vf_f32m8_b4_m(vbool4_t mask,vbool4_t maskedoff,vfloat32m8_t op1,float op2,size_t vl)304 vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
305                                   vfloat32m8_t op1, float op2, size_t vl) {
306   return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
307 }
308 
309 //
310 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m(
311 // CHECK-RV64-NEXT:  entry:
312 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
313 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
314 //
test_vmfle_vv_f64m1_b64_m(vbool64_t mask,vbool64_t maskedoff,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)315 vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
316                                     vfloat64m1_t op1, vfloat64m1_t op2,
317                                     size_t vl) {
318   return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
319 }
320 
321 //
322 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m(
323 // CHECK-RV64-NEXT:  entry:
324 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
325 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
326 //
test_vmfle_vf_f64m1_b64_m(vbool64_t mask,vbool64_t maskedoff,vfloat64m1_t op1,double op2,size_t vl)327 vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
328                                     vfloat64m1_t op1, double op2, size_t vl) {
329   return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
330 }
331 
332 //
333 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m(
334 // CHECK-RV64-NEXT:  entry:
335 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
336 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
337 //
test_vmfle_vv_f64m2_b32_m(vbool32_t mask,vbool32_t maskedoff,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)338 vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
339                                     vfloat64m2_t op1, vfloat64m2_t op2,
340                                     size_t vl) {
341   return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
342 }
343 
344 //
345 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m(
346 // CHECK-RV64-NEXT:  entry:
347 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
348 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
349 //
test_vmfle_vf_f64m2_b32_m(vbool32_t mask,vbool32_t maskedoff,vfloat64m2_t op1,double op2,size_t vl)350 vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
351                                     vfloat64m2_t op1, double op2, size_t vl) {
352   return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
353 }
354 
355 //
356 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m(
357 // CHECK-RV64-NEXT:  entry:
358 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
359 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
360 //
test_vmfle_vv_f64m4_b16_m(vbool16_t mask,vbool16_t maskedoff,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)361 vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
362                                     vfloat64m4_t op1, vfloat64m4_t op2,
363                                     size_t vl) {
364   return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
365 }
366 
367 //
368 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
372 //
test_vmfle_vf_f64m4_b16_m(vbool16_t mask,vbool16_t maskedoff,vfloat64m4_t op1,double op2,size_t vl)373 vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
374                                     vfloat64m4_t op1, double op2, size_t vl) {
375   return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
376 }
377 
378 //
379 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
382 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
383 //
test_vmfle_vv_f64m8_b8_m(vbool8_t mask,vbool8_t maskedoff,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)384 vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
385                                   vfloat64m8_t op1, vfloat64m8_t op2,
386                                   size_t vl) {
387   return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
388 }
389 
390 //
391 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m(
392 // CHECK-RV64-NEXT:  entry:
393 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
394 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
395 //
test_vmfle_vf_f64m8_b8_m(vbool8_t mask,vbool8_t maskedoff,vfloat64m8_t op1,double op2,size_t vl)396 vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
397                                   vfloat64m8_t op1, double op2, size_t vl) {
398   return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
399 }
400