1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfmin_vv_f32mf2(vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)14 vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
15                                    size_t vl) {
16   return vfmin_vv_f32mf2(op1, op2, vl);
17 }
18 
19 //
20 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
21 // CHECK-RV64-NEXT:  entry:
22 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
24 //
test_vfmin_vf_f32mf2(vfloat32mf2_t op1,float op2,size_t vl)25 vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
26   return vfmin_vf_f32mf2(op1, op2, vl);
27 }
28 
29 //
30 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
31 // CHECK-RV64-NEXT:  entry:
32 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
34 //
test_vfmin_vv_f32m1(vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)35 vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
36                                  size_t vl) {
37   return vfmin_vv_f32m1(op1, op2, vl);
38 }
39 
40 //
41 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
42 // CHECK-RV64-NEXT:  entry:
43 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
44 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
45 //
test_vfmin_vf_f32m1(vfloat32m1_t op1,float op2,size_t vl)46 vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
47   return vfmin_vf_f32m1(op1, op2, vl);
48 }
49 
50 //
51 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
52 // CHECK-RV64-NEXT:  entry:
53 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
54 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
55 //
test_vfmin_vv_f32m2(vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)56 vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
57                                  size_t vl) {
58   return vfmin_vv_f32m2(op1, op2, vl);
59 }
60 
61 //
62 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
63 // CHECK-RV64-NEXT:  entry:
64 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
65 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
66 //
test_vfmin_vf_f32m2(vfloat32m2_t op1,float op2,size_t vl)67 vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
68   return vfmin_vf_f32m2(op1, op2, vl);
69 }
70 
71 //
72 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
76 //
test_vfmin_vv_f32m4(vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)77 vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
78                                  size_t vl) {
79   return vfmin_vv_f32m4(op1, op2, vl);
80 }
81 
82 //
83 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
84 // CHECK-RV64-NEXT:  entry:
85 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
86 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
87 //
test_vfmin_vf_f32m4(vfloat32m4_t op1,float op2,size_t vl)88 vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
89   return vfmin_vf_f32m4(op1, op2, vl);
90 }
91 
92 //
93 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
94 // CHECK-RV64-NEXT:  entry:
95 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
96 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
97 //
test_vfmin_vv_f32m8(vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)98 vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
99                                  size_t vl) {
100   return vfmin_vv_f32m8(op1, op2, vl);
101 }
102 
103 //
104 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
105 // CHECK-RV64-NEXT:  entry:
106 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
107 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
108 //
test_vfmin_vf_f32m8(vfloat32m8_t op1,float op2,size_t vl)109 vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
110   return vfmin_vf_f32m8(op1, op2, vl);
111 }
112 
113 //
114 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
115 // CHECK-RV64-NEXT:  entry:
116 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
117 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
118 //
test_vfmin_vv_f64m1(vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)119 vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
120                                  size_t vl) {
121   return vfmin_vv_f64m1(op1, op2, vl);
122 }
123 
124 //
125 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
126 // CHECK-RV64-NEXT:  entry:
127 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
128 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
129 //
test_vfmin_vf_f64m1(vfloat64m1_t op1,double op2,size_t vl)130 vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
131   return vfmin_vf_f64m1(op1, op2, vl);
132 }
133 
134 //
135 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
138 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
139 //
test_vfmin_vv_f64m2(vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)140 vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
141                                  size_t vl) {
142   return vfmin_vv_f64m2(op1, op2, vl);
143 }
144 
145 //
146 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
147 // CHECK-RV64-NEXT:  entry:
148 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
149 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
150 //
test_vfmin_vf_f64m2(vfloat64m2_t op1,double op2,size_t vl)151 vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
152   return vfmin_vf_f64m2(op1, op2, vl);
153 }
154 
155 //
156 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
157 // CHECK-RV64-NEXT:  entry:
158 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
159 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
160 //
test_vfmin_vv_f64m4(vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)161 vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
162                                  size_t vl) {
163   return vfmin_vv_f64m4(op1, op2, vl);
164 }
165 
166 //
167 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
168 // CHECK-RV64-NEXT:  entry:
169 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
170 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
171 //
test_vfmin_vf_f64m4(vfloat64m4_t op1,double op2,size_t vl)172 vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
173   return vfmin_vf_f64m4(op1, op2, vl);
174 }
175 
176 //
177 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
178 // CHECK-RV64-NEXT:  entry:
179 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
180 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
181 //
test_vfmin_vv_f64m8(vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)182 vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
183                                  size_t vl) {
184   return vfmin_vv_f64m8(op1, op2, vl);
185 }
186 
187 //
188 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
192 //
test_vfmin_vf_f64m8(vfloat64m8_t op1,double op2,size_t vl)193 vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
194   return vfmin_vf_f64m8(op1, op2, vl);
195 }
196 
197 //
198 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
202 //
test_vfmin_vv_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)203 vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
204                                      vfloat32mf2_t op1, vfloat32mf2_t op2,
205                                      size_t vl) {
206   return vfmin_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
207 }
208 
209 //
210 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
211 // CHECK-RV64-NEXT:  entry:
212 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
214 //
test_vfmin_vf_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t op1,float op2,size_t vl)215 vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
216                                      vfloat32mf2_t op1, float op2, size_t vl) {
217   return vfmin_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
218 }
219 
220 //
221 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
222 // CHECK-RV64-NEXT:  entry:
223 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
224 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
225 //
test_vfmin_vv_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)226 vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
227                                    vfloat32m1_t op1, vfloat32m1_t op2,
228                                    size_t vl) {
229   return vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
230 }
231 
232 //
233 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
234 // CHECK-RV64-NEXT:  entry:
235 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
236 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
237 //
test_vfmin_vf_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t op1,float op2,size_t vl)238 vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
239                                    vfloat32m1_t op1, float op2, size_t vl) {
240   return vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
241 }
242 
243 //
244 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
245 // CHECK-RV64-NEXT:  entry:
246 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
247 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
248 //
test_vfmin_vv_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)249 vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
250                                    vfloat32m2_t op1, vfloat32m2_t op2,
251                                    size_t vl) {
252   return vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
253 }
254 
255 //
256 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
257 // CHECK-RV64-NEXT:  entry:
258 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
259 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
260 //
test_vfmin_vf_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t op1,float op2,size_t vl)261 vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
262                                    vfloat32m2_t op1, float op2, size_t vl) {
263   return vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
264 }
265 
266 //
267 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
268 // CHECK-RV64-NEXT:  entry:
269 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
270 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
271 //
test_vfmin_vv_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)272 vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
273                                    vfloat32m4_t op1, vfloat32m4_t op2,
274                                    size_t vl) {
275   return vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
276 }
277 
278 //
279 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
282 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
283 //
test_vfmin_vf_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t op1,float op2,size_t vl)284 vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
285                                    vfloat32m4_t op1, float op2, size_t vl) {
286   return vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
287 }
288 
289 //
290 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
291 // CHECK-RV64-NEXT:  entry:
292 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
293 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
294 //
test_vfmin_vv_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)295 vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
296                                    vfloat32m8_t op1, vfloat32m8_t op2,
297                                    size_t vl) {
298   return vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
299 }
300 
301 //
302 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
303 // CHECK-RV64-NEXT:  entry:
304 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
305 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
306 //
test_vfmin_vf_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t op1,float op2,size_t vl)307 vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
308                                    vfloat32m8_t op1, float op2, size_t vl) {
309   return vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
310 }
311 
312 //
313 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
314 // CHECK-RV64-NEXT:  entry:
315 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
316 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
317 //
test_vfmin_vv_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)318 vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
319                                    vfloat64m1_t op1, vfloat64m1_t op2,
320                                    size_t vl) {
321   return vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
322 }
323 
324 //
325 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
326 // CHECK-RV64-NEXT:  entry:
327 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
328 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
329 //
test_vfmin_vf_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,double op2,size_t vl)330 vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
331                                    vfloat64m1_t op1, double op2, size_t vl) {
332   return vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
333 }
334 
335 //
336 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
337 // CHECK-RV64-NEXT:  entry:
338 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
339 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
340 //
test_vfmin_vv_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)341 vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
342                                    vfloat64m2_t op1, vfloat64m2_t op2,
343                                    size_t vl) {
344   return vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
345 }
346 
347 //
348 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
349 // CHECK-RV64-NEXT:  entry:
350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
352 //
test_vfmin_vf_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,double op2,size_t vl)353 vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
354                                    vfloat64m2_t op1, double op2, size_t vl) {
355   return vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
356 }
357 
358 //
359 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
362 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
363 //
test_vfmin_vv_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)364 vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
365                                    vfloat64m4_t op1, vfloat64m4_t op2,
366                                    size_t vl) {
367   return vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
368 }
369 
370 //
371 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
372 // CHECK-RV64-NEXT:  entry:
373 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
375 //
test_vfmin_vf_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,double op2,size_t vl)376 vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
377                                    vfloat64m4_t op1, double op2, size_t vl) {
378   return vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
379 }
380 
381 //
382 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
383 // CHECK-RV64-NEXT:  entry:
384 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
385 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
386 //
test_vfmin_vv_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)387 vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
388                                    vfloat64m8_t op1, vfloat64m8_t op2,
389                                    size_t vl) {
390   return vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
391 }
392 
393 //
394 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
395 // CHECK-RV64-NEXT:  entry:
396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
398 //
test_vfmin_vf_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,double op2,size_t vl)399 vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
400                                    vfloat64m8_t op1, double op2, size_t vl) {
401   return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
402 }
403