1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
13 //
test_vfwsub_vv_f64m1(vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)14 vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
15                                   size_t vl) {
16   return vfwsub_vv_f64m1(op1, op2, vl);
17 }
18 
19 //
20 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
21 // CHECK-RV64-NEXT:  entry:
22 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
24 //
test_vfwsub_vf_f64m1(vfloat32mf2_t op1,float op2,size_t vl)25 vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
26   return vfwsub_vf_f64m1(op1, op2, vl);
27 }
28 
29 //
30 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
31 // CHECK-RV64-NEXT:  entry:
32 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
34 //
test_vfwsub_wv_f64m1(vfloat64m1_t op1,vfloat32mf2_t op2,size_t vl)35 vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
36                                   size_t vl) {
37   return vfwsub_wv_f64m1(op1, op2, vl);
38 }
39 
40 //
41 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
42 // CHECK-RV64-NEXT:  entry:
43 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
44 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
45 //
test_vfwsub_wf_f64m1(vfloat64m1_t op1,float op2,size_t vl)46 vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
47   return vfwsub_wf_f64m1(op1, op2, vl);
48 }
49 
50 //
51 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
52 // CHECK-RV64-NEXT:  entry:
53 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
54 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
55 //
test_vfwsub_vv_f64m2(vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)56 vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
57                                   size_t vl) {
58   return vfwsub_vv_f64m2(op1, op2, vl);
59 }
60 
61 //
62 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
63 // CHECK-RV64-NEXT:  entry:
64 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
65 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
66 //
test_vfwsub_vf_f64m2(vfloat32m1_t op1,float op2,size_t vl)67 vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
68   return vfwsub_vf_f64m2(op1, op2, vl);
69 }
70 
71 //
72 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
76 //
test_vfwsub_wv_f64m2(vfloat64m2_t op1,vfloat32m1_t op2,size_t vl)77 vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
78                                   size_t vl) {
79   return vfwsub_wv_f64m2(op1, op2, vl);
80 }
81 
82 //
83 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
84 // CHECK-RV64-NEXT:  entry:
85 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
86 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
87 //
test_vfwsub_wf_f64m2(vfloat64m2_t op1,float op2,size_t vl)88 vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
89   return vfwsub_wf_f64m2(op1, op2, vl);
90 }
91 
92 //
93 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
94 // CHECK-RV64-NEXT:  entry:
95 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
96 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
97 //
test_vfwsub_vv_f64m4(vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)98 vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
99                                   size_t vl) {
100   return vfwsub_vv_f64m4(op1, op2, vl);
101 }
102 
103 //
104 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
105 // CHECK-RV64-NEXT:  entry:
106 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
107 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
108 //
test_vfwsub_vf_f64m4(vfloat32m2_t op1,float op2,size_t vl)109 vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
110   return vfwsub_vf_f64m4(op1, op2, vl);
111 }
112 
113 //
114 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
115 // CHECK-RV64-NEXT:  entry:
116 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
117 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
118 //
test_vfwsub_wv_f64m4(vfloat64m4_t op1,vfloat32m2_t op2,size_t vl)119 vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
120                                   size_t vl) {
121   return vfwsub_wv_f64m4(op1, op2, vl);
122 }
123 
124 //
125 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
126 // CHECK-RV64-NEXT:  entry:
127 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
128 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
129 //
test_vfwsub_wf_f64m4(vfloat64m4_t op1,float op2,size_t vl)130 vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
131   return vfwsub_wf_f64m4(op1, op2, vl);
132 }
133 
134 //
135 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
138 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
139 //
test_vfwsub_vv_f64m8(vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)140 vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
141                                   size_t vl) {
142   return vfwsub_vv_f64m8(op1, op2, vl);
143 }
144 
145 //
146 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
147 // CHECK-RV64-NEXT:  entry:
148 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
149 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
150 //
test_vfwsub_vf_f64m8(vfloat32m4_t op1,float op2,size_t vl)151 vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
152   return vfwsub_vf_f64m8(op1, op2, vl);
153 }
154 
155 //
156 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
157 // CHECK-RV64-NEXT:  entry:
158 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
159 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
160 //
test_vfwsub_wv_f64m8(vfloat64m8_t op1,vfloat32m4_t op2,size_t vl)161 vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
162                                   size_t vl) {
163   return vfwsub_wv_f64m8(op1, op2, vl);
164 }
165 
166 //
167 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
168 // CHECK-RV64-NEXT:  entry:
169 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
170 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
171 //
test_vfwsub_wf_f64m8(vfloat64m8_t op1,float op2,size_t vl)172 vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
173   return vfwsub_wf_f64m8(op1, op2, vl);
174 }
175 
176 //
177 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m(
178 // CHECK-RV64-NEXT:  entry:
179 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
180 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
181 //
test_vfwsub_vv_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)182 vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
183                                     vfloat32mf2_t op1, vfloat32mf2_t op2,
184                                     size_t vl) {
185   return vfwsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
186 }
187 
188 //
189 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
193 //
test_vfwsub_vf_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat32mf2_t op1,float op2,size_t vl)194 vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
195                                     vfloat32mf2_t op1, float op2, size_t vl) {
196   return vfwsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
197 }
198 
199 //
200 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m(
201 // CHECK-RV64-NEXT:  entry:
202 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
204 //
test_vfwsub_wv_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,vfloat32mf2_t op2,size_t vl)205 vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
206                                     vfloat64m1_t op1, vfloat32mf2_t op2,
207                                     size_t vl) {
208   return vfwsub_wv_f64m1_m(mask, maskedoff, op1, op2, vl);
209 }
210 
211 //
212 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m(
213 // CHECK-RV64-NEXT:  entry:
214 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
215 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
216 //
test_vfwsub_wf_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,float op2,size_t vl)217 vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
218                                     vfloat64m1_t op1, float op2, size_t vl) {
219   return vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl);
220 }
221 
222 //
223 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m(
224 // CHECK-RV64-NEXT:  entry:
225 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
226 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
227 //
test_vfwsub_vv_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)228 vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
229                                     vfloat32m1_t op1, vfloat32m1_t op2,
230                                     size_t vl) {
231   return vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
232 }
233 
234 //
235 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m(
236 // CHECK-RV64-NEXT:  entry:
237 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
238 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
239 //
test_vfwsub_vf_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat32m1_t op1,float op2,size_t vl)240 vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
241                                     vfloat32m1_t op1, float op2, size_t vl) {
242   return vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
243 }
244 
245 //
246 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m(
247 // CHECK-RV64-NEXT:  entry:
248 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
249 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
250 //
test_vfwsub_wv_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,vfloat32m1_t op2,size_t vl)251 vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
252                                     vfloat64m2_t op1, vfloat32m1_t op2,
253                                     size_t vl) {
254   return vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl);
255 }
256 
257 //
258 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
262 //
test_vfwsub_wf_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,float op2,size_t vl)263 vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
264                                     vfloat64m2_t op1, float op2, size_t vl) {
265   return vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl);
266 }
267 
268 //
269 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
273 //
test_vfwsub_vv_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)274 vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
275                                     vfloat32m2_t op1, vfloat32m2_t op2,
276                                     size_t vl) {
277   return vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
278 }
279 
280 //
281 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m(
282 // CHECK-RV64-NEXT:  entry:
283 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
284 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
285 //
test_vfwsub_vf_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat32m2_t op1,float op2,size_t vl)286 vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
287                                     vfloat32m2_t op1, float op2, size_t vl) {
288   return vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
289 }
290 
291 //
292 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m(
293 // CHECK-RV64-NEXT:  entry:
294 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
295 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
296 //
test_vfwsub_wv_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,vfloat32m2_t op2,size_t vl)297 vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
298                                     vfloat64m4_t op1, vfloat32m2_t op2,
299                                     size_t vl) {
300   return vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl);
301 }
302 
303 //
304 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m(
305 // CHECK-RV64-NEXT:  entry:
306 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
307 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
308 //
test_vfwsub_wf_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,float op2,size_t vl)309 vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
310                                     vfloat64m4_t op1, float op2, size_t vl) {
311   return vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl);
312 }
313 
314 //
315 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m(
316 // CHECK-RV64-NEXT:  entry:
317 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
318 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
319 //
test_vfwsub_vv_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)320 vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
321                                     vfloat32m4_t op1, vfloat32m4_t op2,
322                                     size_t vl) {
323   return vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
324 }
325 
326 //
327 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m(
328 // CHECK-RV64-NEXT:  entry:
329 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
330 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
331 //
test_vfwsub_vf_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat32m4_t op1,float op2,size_t vl)332 vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
333                                     vfloat32m4_t op1, float op2, size_t vl) {
334   return vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
342 //
test_vfwsub_wv_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,vfloat32m4_t op2,size_t vl)343 vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
344                                     vfloat64m8_t op1, vfloat32m4_t op2,
345                                     size_t vl) {
346   return vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl);
347 }
348 
349 //
350 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m(
351 // CHECK-RV64-NEXT:  entry:
352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
354 //
test_vfwsub_wf_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,float op2,size_t vl)355 vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
356                                     vfloat64m8_t op1, float op2, size_t vl) {
357   return vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl);
358 }
359