1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
13 //
test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src,size_t vl)14 vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
15   return vfncvt_x_f_w_i16mf4(src, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
23 //
test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src,size_t vl)24 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
25   return vfncvt_rtz_x_f_w_i16mf4(src, vl);
26 }
27 
28 //
29 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
33 //
test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src,size_t vl)34 vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
35   return vfncvt_x_f_w_i16mf2(src, vl);
36 }
37 
38 //
39 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
43 //
test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src,size_t vl)44 vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
45   return vfncvt_rtz_x_f_w_i16mf2(src, vl);
46 }
47 
48 //
49 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
53 //
test_vfncvt_x_f_w_i16m1(vfloat32m2_t src,size_t vl)54 vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
55   return vfncvt_x_f_w_i16m1(src, vl);
56 }
57 
58 //
59 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
63 //
test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src,size_t vl)64 vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
65   return vfncvt_rtz_x_f_w_i16m1(src, vl);
66 }
67 
68 //
69 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
73 //
test_vfncvt_x_f_w_i16m2(vfloat32m4_t src,size_t vl)74 vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
75   return vfncvt_x_f_w_i16m2(src, vl);
76 }
77 
78 //
79 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
83 //
test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src,size_t vl)84 vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
85   return vfncvt_rtz_x_f_w_i16m2(src, vl);
86 }
87 
88 //
89 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
93 //
test_vfncvt_x_f_w_i16m4(vfloat32m8_t src,size_t vl)94 vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
95   return vfncvt_x_f_w_i16m4(src, vl);
96 }
97 
98 //
99 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
103 //
test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src,size_t vl)104 vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
105   return vfncvt_rtz_x_f_w_i16m4(src, vl);
106 }
107 
108 //
109 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
113 //
test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src,size_t vl)114 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
115   return vfncvt_xu_f_w_u16mf4(src, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
123 //
test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src,size_t vl)124 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
125   return vfncvt_rtz_xu_f_w_u16mf4(src, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
133 //
test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src,size_t vl)134 vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
135   return vfncvt_xu_f_w_u16mf2(src, vl);
136 }
137 
138 //
139 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
143 //
test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src,size_t vl)144 vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
145   return vfncvt_rtz_xu_f_w_u16mf2(src, vl);
146 }
147 
148 //
149 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
153 //
test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src,size_t vl)154 vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
155   return vfncvt_xu_f_w_u16m1(src, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
163 //
test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src,size_t vl)164 vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
165   return vfncvt_rtz_xu_f_w_u16m1(src, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
173 //
test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src,size_t vl)174 vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
175   return vfncvt_xu_f_w_u16m2(src, vl);
176 }
177 
178 //
179 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
183 //
test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src,size_t vl)184 vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
185   return vfncvt_rtz_xu_f_w_u16m2(src, vl);
186 }
187 
188 //
189 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
193 //
test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src,size_t vl)194 vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
195   return vfncvt_xu_f_w_u16m4(src, vl);
196 }
197 
198 //
199 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
203 //
test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src,size_t vl)204 vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
205   return vfncvt_rtz_xu_f_w_u16m4(src, vl);
206 }
207 
208 //
209 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
210 // CHECK-RV64-NEXT:  entry:
211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
212 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
213 //
test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src,size_t vl)214 vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
215   return vfncvt_x_f_w_i32mf2(src, vl);
216 }
217 
218 //
219 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
220 // CHECK-RV64-NEXT:  entry:
221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
223 //
test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src,size_t vl)224 vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
225   return vfncvt_rtz_x_f_w_i32mf2(src, vl);
226 }
227 
228 //
229 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
233 //
test_vfncvt_x_f_w_i32m1(vfloat64m2_t src,size_t vl)234 vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
235   return vfncvt_x_f_w_i32m1(src, vl);
236 }
237 
238 //
239 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
243 //
test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src,size_t vl)244 vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
245   return vfncvt_rtz_x_f_w_i32m1(src, vl);
246 }
247 
248 //
249 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
250 // CHECK-RV64-NEXT:  entry:
251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
252 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
253 //
test_vfncvt_x_f_w_i32m2(vfloat64m4_t src,size_t vl)254 vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
255   return vfncvt_x_f_w_i32m2(src, vl);
256 }
257 
258 //
259 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
263 //
test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src,size_t vl)264 vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
265   return vfncvt_rtz_x_f_w_i32m2(src, vl);
266 }
267 
268 //
269 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
273 //
test_vfncvt_x_f_w_i32m4(vfloat64m8_t src,size_t vl)274 vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
275   return vfncvt_x_f_w_i32m4(src, vl);
276 }
277 
278 //
279 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
282 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
283 //
test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src,size_t vl)284 vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
285   return vfncvt_rtz_x_f_w_i32m4(src, vl);
286 }
287 
288 //
289 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
290 // CHECK-RV64-NEXT:  entry:
291 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
292 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
293 //
test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src,size_t vl)294 vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
295   return vfncvt_xu_f_w_u32mf2(src, vl);
296 }
297 
298 //
299 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
303 //
test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src,size_t vl)304 vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
305   return vfncvt_rtz_xu_f_w_u32mf2(src, vl);
306 }
307 
308 //
309 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
310 // CHECK-RV64-NEXT:  entry:
311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
312 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
313 //
test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src,size_t vl)314 vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
315   return vfncvt_xu_f_w_u32m1(src, vl);
316 }
317 
318 //
319 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
320 // CHECK-RV64-NEXT:  entry:
321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
322 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
323 //
test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src,size_t vl)324 vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
325   return vfncvt_rtz_xu_f_w_u32m1(src, vl);
326 }
327 
328 //
329 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
330 // CHECK-RV64-NEXT:  entry:
331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
332 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
333 //
test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src,size_t vl)334 vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
335   return vfncvt_xu_f_w_u32m2(src, vl);
336 }
337 
338 //
339 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
343 //
test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src,size_t vl)344 vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
345   return vfncvt_rtz_xu_f_w_u32m2(src, vl);
346 }
347 
348 //
349 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
353 //
test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src,size_t vl)354 vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
355   return vfncvt_xu_f_w_u32m4(src, vl);
356 }
357 
358 //
359 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
362 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
363 //
test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src,size_t vl)364 vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
365   return vfncvt_rtz_xu_f_w_u32m4(src, vl);
366 }
367 
368 //
369 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
372 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
373 //
test_vfncvt_f_x_w_f32mf2(vint64m1_t src,size_t vl)374 vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
375   return vfncvt_f_x_w_f32mf2(src, vl);
376 }
377 
378 //
379 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
382 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
383 //
test_vfncvt_f_x_w_f32m1(vint64m2_t src,size_t vl)384 vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
385   return vfncvt_f_x_w_f32m1(src, vl);
386 }
387 
388 //
389 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
390 // CHECK-RV64-NEXT:  entry:
391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
392 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
393 //
test_vfncvt_f_x_w_f32m2(vint64m4_t src,size_t vl)394 vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
395   return vfncvt_f_x_w_f32m2(src, vl);
396 }
397 
398 //
399 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
400 // CHECK-RV64-NEXT:  entry:
401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
402 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
403 //
test_vfncvt_f_x_w_f32m4(vint64m8_t src,size_t vl)404 vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
405   return vfncvt_f_x_w_f32m4(src, vl);
406 }
407 
408 //
409 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
410 // CHECK-RV64-NEXT:  entry:
411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
413 //
test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src,size_t vl)414 vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
415   return vfncvt_f_xu_w_f32mf2(src, vl);
416 }
417 
418 //
419 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
420 // CHECK-RV64-NEXT:  entry:
421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
422 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
423 //
test_vfncvt_f_xu_w_f32m1(vuint64m2_t src,size_t vl)424 vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
425   return vfncvt_f_xu_w_f32m1(src, vl);
426 }
427 
428 //
429 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
430 // CHECK-RV64-NEXT:  entry:
431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
432 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
433 //
test_vfncvt_f_xu_w_f32m2(vuint64m4_t src,size_t vl)434 vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
435   return vfncvt_f_xu_w_f32m2(src, vl);
436 }
437 
438 //
439 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
443 //
test_vfncvt_f_xu_w_f32m4(vuint64m8_t src,size_t vl)444 vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
445   return vfncvt_f_xu_w_f32m4(src, vl);
446 }
447 
448 //
449 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
453 //
test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src,size_t vl)454 vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
455   return vfncvt_f_f_w_f32mf2(src, vl);
456 }
457 
458 //
459 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
463 //
test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src,size_t vl)464 vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
465   return vfncvt_rod_f_f_w_f32mf2(src, vl);
466 }
467 
468 //
469 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
470 // CHECK-RV64-NEXT:  entry:
471 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
472 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
473 //
test_vfncvt_f_f_w_f32m1(vfloat64m2_t src,size_t vl)474 vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
475   return vfncvt_f_f_w_f32m1(src, vl);
476 }
477 
478 //
479 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
480 // CHECK-RV64-NEXT:  entry:
481 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
482 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
483 //
test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src,size_t vl)484 vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
485   return vfncvt_rod_f_f_w_f32m1(src, vl);
486 }
487 
488 //
489 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
490 // CHECK-RV64-NEXT:  entry:
491 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
492 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
493 //
test_vfncvt_f_f_w_f32m2(vfloat64m4_t src,size_t vl)494 vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
495   return vfncvt_f_f_w_f32m2(src, vl);
496 }
497 
498 //
499 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
500 // CHECK-RV64-NEXT:  entry:
501 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
502 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
503 //
test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src,size_t vl)504 vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
505   return vfncvt_rod_f_f_w_f32m2(src, vl);
506 }
507 
508 //
509 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
510 // CHECK-RV64-NEXT:  entry:
511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
512 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
513 //
test_vfncvt_f_f_w_f32m4(vfloat64m8_t src,size_t vl)514 vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
515   return vfncvt_f_f_w_f32m4(src, vl);
516 }
517 
518 //
519 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
520 // CHECK-RV64-NEXT:  entry:
521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
522 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
523 //
test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src,size_t vl)524 vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
525   return vfncvt_rod_f_f_w_f32m4(src, vl);
526 }
527 
528 //
529 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
533 //
test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vfloat32mf2_t src,size_t vl)534 vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
535                                        vfloat32mf2_t src, size_t vl) {
536   return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl);
537 }
538 
539 //
540 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
541 // CHECK-RV64-NEXT:  entry:
542 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
543 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
544 //
test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vfloat32mf2_t src,size_t vl)545 vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask,
546                                            vint16mf4_t maskedoff,
547                                            vfloat32mf2_t src, size_t vl) {
548   return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl);
549 }
550 
551 //
552 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
553 // CHECK-RV64-NEXT:  entry:
554 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
555 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
556 //
test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vfloat32m1_t src,size_t vl)557 vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
558                                        vfloat32m1_t src, size_t vl) {
559   return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl);
560 }
561 
562 //
563 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
564 // CHECK-RV64-NEXT:  entry:
565 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
566 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
567 //
test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vfloat32m1_t src,size_t vl)568 vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask,
569                                            vint16mf2_t maskedoff,
570                                            vfloat32m1_t src, size_t vl) {
571   return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl);
572 }
573 
574 //
575 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m(
576 // CHECK-RV64-NEXT:  entry:
577 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
578 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
579 //
test_vfncvt_x_f_w_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vfloat32m2_t src,size_t vl)580 vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
581                                      vfloat32m2_t src, size_t vl) {
582   return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl);
583 }
584 
585 //
586 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
587 // CHECK-RV64-NEXT:  entry:
588 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
589 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
590 //
test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vfloat32m2_t src,size_t vl)591 vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
592                                          vfloat32m2_t src, size_t vl) {
593   return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl);
594 }
595 
596 //
597 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m(
598 // CHECK-RV64-NEXT:  entry:
599 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
600 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
601 //
test_vfncvt_x_f_w_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vfloat32m4_t src,size_t vl)602 vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
603                                      vfloat32m4_t src, size_t vl) {
604   return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl);
605 }
606 
607 //
608 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
609 // CHECK-RV64-NEXT:  entry:
610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
611 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
612 //
test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vfloat32m4_t src,size_t vl)613 vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
614                                          vfloat32m4_t src, size_t vl) {
615   return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl);
616 }
617 
618 //
619 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
623 //
test_vfncvt_x_f_w_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vfloat32m8_t src,size_t vl)624 vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
625                                      vfloat32m8_t src, size_t vl) {
626   return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl);
627 }
628 
629 //
630 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
631 // CHECK-RV64-NEXT:  entry:
632 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
633 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
634 //
test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vfloat32m8_t src,size_t vl)635 vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
636                                          vfloat32m8_t src, size_t vl) {
637   return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl);
638 }
639 
640 //
641 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
642 // CHECK-RV64-NEXT:  entry:
643 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
644 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
645 //
test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vfloat32mf2_t src,size_t vl)646 vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
647                                          vfloat32mf2_t src, size_t vl) {
648   return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl);
649 }
650 
651 //
652 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
653 // CHECK-RV64-NEXT:  entry:
654 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
655 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
656 //
test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vfloat32mf2_t src,size_t vl)657 vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask,
658                                              vuint16mf4_t maskedoff,
659                                              vfloat32mf2_t src, size_t vl) {
660   return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl);
661 }
662 
663 //
664 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
665 // CHECK-RV64-NEXT:  entry:
666 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
667 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
668 //
test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vfloat32m1_t src,size_t vl)669 vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
670                                          vfloat32m1_t src, size_t vl) {
671   return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl);
672 }
673 
674 //
675 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
676 // CHECK-RV64-NEXT:  entry:
677 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
678 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
679 //
test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vfloat32m1_t src,size_t vl)680 vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask,
681                                              vuint16mf2_t maskedoff,
682                                              vfloat32m1_t src, size_t vl) {
683   return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl);
684 }
685 
686 //
687 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
688 // CHECK-RV64-NEXT:  entry:
689 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
690 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
691 //
test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vfloat32m2_t src,size_t vl)692 vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
693                                        vfloat32m2_t src, size_t vl) {
694   return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl);
695 }
696 
697 //
698 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
699 // CHECK-RV64-NEXT:  entry:
700 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
701 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
702 //
test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vfloat32m2_t src,size_t vl)703 vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask,
704                                            vuint16m1_t maskedoff,
705                                            vfloat32m2_t src, size_t vl) {
706   return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl);
707 }
708 
709 //
710 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
711 // CHECK-RV64-NEXT:  entry:
712 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
713 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
714 //
test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vfloat32m4_t src,size_t vl)715 vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
716                                        vfloat32m4_t src, size_t vl) {
717   return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl);
718 }
719 
720 //
721 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
722 // CHECK-RV64-NEXT:  entry:
723 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
724 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
725 //
test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vfloat32m4_t src,size_t vl)726 vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
727                                            vfloat32m4_t src, size_t vl) {
728   return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl);
729 }
730 
731 //
732 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
733 // CHECK-RV64-NEXT:  entry:
734 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
735 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
736 //
test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vfloat32m8_t src,size_t vl)737 vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
738                                        vfloat32m8_t src, size_t vl) {
739   return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl);
740 }
741 
742 //
743 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
744 // CHECK-RV64-NEXT:  entry:
745 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
746 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
747 //
test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vfloat32m8_t src,size_t vl)748 vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
749                                            vfloat32m8_t src, size_t vl) {
750   return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl);
751 }
752 
753 //
754 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
755 // CHECK-RV64-NEXT:  entry:
756 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
757 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
758 //
test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vfloat64m1_t src,size_t vl)759 vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
760                                        vfloat64m1_t src, size_t vl) {
761   return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl);
762 }
763 
764 //
765 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
766 // CHECK-RV64-NEXT:  entry:
767 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
768 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
769 //
test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vfloat64m1_t src,size_t vl)770 vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask,
771                                            vint32mf2_t maskedoff,
772                                            vfloat64m1_t src, size_t vl) {
773   return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl);
774 }
775 
776 //
777 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m(
778 // CHECK-RV64-NEXT:  entry:
779 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
780 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
781 //
test_vfncvt_x_f_w_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vfloat64m2_t src,size_t vl)782 vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
783                                      vfloat64m2_t src, size_t vl) {
784   return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl);
785 }
786 
787 //
788 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
789 // CHECK-RV64-NEXT:  entry:
790 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
791 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
792 //
test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vfloat64m2_t src,size_t vl)793 vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
794                                          vfloat64m2_t src, size_t vl) {
795   return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl);
796 }
797 
798 //
799 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m(
800 // CHECK-RV64-NEXT:  entry:
801 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
802 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
803 //
test_vfncvt_x_f_w_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vfloat64m4_t src,size_t vl)804 vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
805                                      vfloat64m4_t src, size_t vl) {
806   return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl);
807 }
808 
809 //
810 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
811 // CHECK-RV64-NEXT:  entry:
812 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
813 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
814 //
test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vfloat64m4_t src,size_t vl)815 vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
816                                          vfloat64m4_t src, size_t vl) {
817   return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl);
818 }
819 
820 //
821 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m(
822 // CHECK-RV64-NEXT:  entry:
823 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
824 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
825 //
test_vfncvt_x_f_w_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vfloat64m8_t src,size_t vl)826 vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
827                                      vfloat64m8_t src, size_t vl) {
828   return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl);
829 }
830 
831 //
832 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
833 // CHECK-RV64-NEXT:  entry:
834 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
835 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
836 //
test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vfloat64m8_t src,size_t vl)837 vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
838                                          vfloat64m8_t src, size_t vl) {
839   return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl);
840 }
841 
842 //
843 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
844 // CHECK-RV64-NEXT:  entry:
845 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
846 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
847 //
test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vfloat64m1_t src,size_t vl)848 vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
849                                          vfloat64m1_t src, size_t vl) {
850   return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl);
851 }
852 
853 //
854 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
855 // CHECK-RV64-NEXT:  entry:
856 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
857 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
858 //
test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vfloat64m1_t src,size_t vl)859 vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask,
860                                              vuint32mf2_t maskedoff,
861                                              vfloat64m1_t src, size_t vl) {
862   return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl);
863 }
864 
865 //
866 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
867 // CHECK-RV64-NEXT:  entry:
868 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
869 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
870 //
test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vfloat64m2_t src,size_t vl)871 vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
872                                        vfloat64m2_t src, size_t vl) {
873   return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl);
874 }
875 
876 //
877 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
878 // CHECK-RV64-NEXT:  entry:
879 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
880 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
881 //
test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vfloat64m2_t src,size_t vl)882 vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask,
883                                            vuint32m1_t maskedoff,
884                                            vfloat64m2_t src, size_t vl) {
885   return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl);
886 }
887 
888 //
889 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
893 //
test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vfloat64m4_t src,size_t vl)894 vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
895                                        vfloat64m4_t src, size_t vl) {
896   return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl);
897 }
898 
899 //
900 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
901 // CHECK-RV64-NEXT:  entry:
902 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
904 //
test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vfloat64m4_t src,size_t vl)905 vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask,
906                                            vuint32m2_t maskedoff,
907                                            vfloat64m4_t src, size_t vl) {
908   return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl);
909 }
910 
911 //
912 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
913 // CHECK-RV64-NEXT:  entry:
914 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
915 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
916 //
test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vfloat64m8_t src,size_t vl)917 vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
918                                        vfloat64m8_t src, size_t vl) {
919   return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl);
920 }
921 
922 //
923 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
924 // CHECK-RV64-NEXT:  entry:
925 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
926 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
927 //
test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vfloat64m8_t src,size_t vl)928 vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
929                                            vfloat64m8_t src, size_t vl) {
930   return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl);
931 }
932 
933 //
934 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
935 // CHECK-RV64-NEXT:  entry:
936 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
937 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
938 //
test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vint64m1_t src,size_t vl)939 vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask,
940                                          vfloat32mf2_t maskedoff,
941                                          vint64m1_t src, size_t vl) {
942   return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl);
943 }
944 
945 //
946 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m(
947 // CHECK-RV64-NEXT:  entry:
948 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
949 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
950 //
test_vfncvt_f_x_w_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vint64m2_t src,size_t vl)951 vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
952                                        vint64m2_t src, size_t vl) {
953   return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl);
954 }
955 
956 //
957 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m(
958 // CHECK-RV64-NEXT:  entry:
959 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
960 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
961 //
test_vfncvt_f_x_w_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vint64m4_t src,size_t vl)962 vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
963                                        vint64m4_t src, size_t vl) {
964   return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl);
965 }
966 
967 //
968 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m(
969 // CHECK-RV64-NEXT:  entry:
970 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
971 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
972 //
test_vfncvt_f_x_w_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vint64m8_t src,size_t vl)973 vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
974                                        vint64m8_t src, size_t vl) {
975   return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl);
976 }
977 
978 //
979 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
982 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
983 //
test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vuint64m1_t src,size_t vl)984 vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask,
985                                           vfloat32mf2_t maskedoff,
986                                           vuint64m1_t src, size_t vl) {
987   return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl);
988 }
989 
990 //
991 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
992 // CHECK-RV64-NEXT:  entry:
993 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
994 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
995 //
test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vuint64m2_t src,size_t vl)996 vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
997                                         vuint64m2_t src, size_t vl) {
998   return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl);
999 }
1000 
1001 //
1002 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
1003 // CHECK-RV64-NEXT:  entry:
1004 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1005 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
1006 //
test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vuint64m4_t src,size_t vl)1007 vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
1008                                         vuint64m4_t src, size_t vl) {
1009   return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl);
1010 }
1011 
1012 //
1013 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
1014 // CHECK-RV64-NEXT:  entry:
1015 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1016 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
1017 //
test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vuint64m8_t src,size_t vl)1018 vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
1019                                         vuint64m8_t src, size_t vl) {
1020   return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl);
1021 }
1022 
1023 //
1024 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
1025 // CHECK-RV64-NEXT:  entry:
1026 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1027 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
1028 //
test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat64m1_t src,size_t vl)1029 vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask,
1030                                          vfloat32mf2_t maskedoff,
1031                                          vfloat64m1_t src, size_t vl) {
1032   return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl);
1033 }
1034 
1035 //
1036 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
1037 // CHECK-RV64-NEXT:  entry:
1038 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1039 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
1040 //
test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat64m1_t src,size_t vl)1041 vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask,
1042                                              vfloat32mf2_t maskedoff,
1043                                              vfloat64m1_t src, size_t vl) {
1044   return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl);
1045 }
1046 
1047 //
1048 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m(
1049 // CHECK-RV64-NEXT:  entry:
1050 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1051 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
1052 //
test_vfncvt_f_f_w_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat64m2_t src,size_t vl)1053 vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
1054                                        vfloat64m2_t src, size_t vl) {
1055   return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl);
1056 }
1057 
1058 //
1059 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
1060 // CHECK-RV64-NEXT:  entry:
1061 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1062 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
1063 //
test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat64m2_t src,size_t vl)1064 vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask,
1065                                            vfloat32m1_t maskedoff,
1066                                            vfloat64m2_t src, size_t vl) {
1067   return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl);
1068 }
1069 
1070 //
1071 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m(
1072 // CHECK-RV64-NEXT:  entry:
1073 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1074 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
1075 //
test_vfncvt_f_f_w_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat64m4_t src,size_t vl)1076 vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
1077                                        vfloat64m4_t src, size_t vl) {
1078   return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl);
1079 }
1080 
1081 //
1082 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
1083 // CHECK-RV64-NEXT:  entry:
1084 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1085 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
1086 //
test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat64m4_t src,size_t vl)1087 vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask,
1088                                            vfloat32m2_t maskedoff,
1089                                            vfloat64m4_t src, size_t vl) {
1090   return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl);
1091 }
1092 
1093 //
1094 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m(
1095 // CHECK-RV64-NEXT:  entry:
1096 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1097 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
1098 //
test_vfncvt_f_f_w_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat64m8_t src,size_t vl)1099 vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
1100                                        vfloat64m8_t src, size_t vl) {
1101   return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl);
1102 }
1103 
1104 //
1105 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
1106 // CHECK-RV64-NEXT:  entry:
1107 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1108 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
1109 //
test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat64m8_t src,size_t vl)1110 vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,
1111                                            vfloat32m4_t maskedoff,
1112                                            vfloat64m8_t src, size_t vl) {
1113   return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl);
1114 }
1115