1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5
6 #include <riscv_vector.h>
7
8 //
9 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfmsub_vv_f32mf2(vfloat32mf2_t acc,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)14 vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
15 vfloat32mf2_t op2, size_t vl) {
16 return vfmsub_vv_f32mf2(acc, op1, op2, vl);
17 }
18
19 //
20 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
24 //
test_vfmsub_vf_f32mf2(vfloat32mf2_t acc,float op1,vfloat32mf2_t op2,size_t vl)25 vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
26 vfloat32mf2_t op2, size_t vl) {
27 return vfmsub_vf_f32mf2(acc, op1, op2, vl);
28 }
29
30 //
31 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
35 //
test_vfmsub_vv_f32m1(vfloat32m1_t acc,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)36 vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
37 vfloat32m1_t op2, size_t vl) {
38 return vfmsub_vv_f32m1(acc, op1, op2, vl);
39 }
40
41 //
42 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
43 // CHECK-RV64-NEXT: entry:
44 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
46 //
test_vfmsub_vf_f32m1(vfloat32m1_t acc,float op1,vfloat32m1_t op2,size_t vl)47 vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
48 size_t vl) {
49 return vfmsub_vf_f32m1(acc, op1, op2, vl);
50 }
51
52 //
53 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
54 // CHECK-RV64-NEXT: entry:
55 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
57 //
test_vfmsub_vv_f32m2(vfloat32m2_t acc,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)58 vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
59 vfloat32m2_t op2, size_t vl) {
60 return vfmsub_vv_f32m2(acc, op1, op2, vl);
61 }
62
63 //
64 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
65 // CHECK-RV64-NEXT: entry:
66 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
68 //
test_vfmsub_vf_f32m2(vfloat32m2_t acc,float op1,vfloat32m2_t op2,size_t vl)69 vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
70 size_t vl) {
71 return vfmsub_vf_f32m2(acc, op1, op2, vl);
72 }
73
74 //
75 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
76 // CHECK-RV64-NEXT: entry:
77 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
79 //
test_vfmsub_vv_f32m4(vfloat32m4_t acc,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)80 vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
81 vfloat32m4_t op2, size_t vl) {
82 return vfmsub_vv_f32m4(acc, op1, op2, vl);
83 }
84
85 //
86 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
87 // CHECK-RV64-NEXT: entry:
88 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
90 //
test_vfmsub_vf_f32m4(vfloat32m4_t acc,float op1,vfloat32m4_t op2,size_t vl)91 vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
92 size_t vl) {
93 return vfmsub_vf_f32m4(acc, op1, op2, vl);
94 }
95
96 //
97 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
101 //
test_vfmsub_vv_f32m8(vfloat32m8_t acc,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)102 vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
103 vfloat32m8_t op2, size_t vl) {
104 return vfmsub_vv_f32m8(acc, op1, op2, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
112 //
test_vfmsub_vf_f32m8(vfloat32m8_t acc,float op1,vfloat32m8_t op2,size_t vl)113 vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
114 size_t vl) {
115 return vfmsub_vf_f32m8(acc, op1, op2, vl);
116 }
117
118 //
119 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
123 //
test_vfmsub_vv_f64m1(vfloat64m1_t acc,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)124 vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
125 vfloat64m1_t op2, size_t vl) {
126 return vfmsub_vv_f64m1(acc, op1, op2, vl);
127 }
128
129 //
130 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
134 //
test_vfmsub_vf_f64m1(vfloat64m1_t acc,double op1,vfloat64m1_t op2,size_t vl)135 vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
136 vfloat64m1_t op2, size_t vl) {
137 return vfmsub_vf_f64m1(acc, op1, op2, vl);
138 }
139
140 //
141 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
144 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
145 //
test_vfmsub_vv_f64m2(vfloat64m2_t acc,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)146 vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
147 vfloat64m2_t op2, size_t vl) {
148 return vfmsub_vv_f64m2(acc, op1, op2, vl);
149 }
150
151 //
152 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
153 // CHECK-RV64-NEXT: entry:
154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
156 //
test_vfmsub_vf_f64m2(vfloat64m2_t acc,double op1,vfloat64m2_t op2,size_t vl)157 vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
158 vfloat64m2_t op2, size_t vl) {
159 return vfmsub_vf_f64m2(acc, op1, op2, vl);
160 }
161
162 //
163 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
164 // CHECK-RV64-NEXT: entry:
165 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
167 //
test_vfmsub_vv_f64m4(vfloat64m4_t acc,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)168 vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
169 vfloat64m4_t op2, size_t vl) {
170 return vfmsub_vv_f64m4(acc, op1, op2, vl);
171 }
172
173 //
174 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
175 // CHECK-RV64-NEXT: entry:
176 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
177 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
178 //
test_vfmsub_vf_f64m4(vfloat64m4_t acc,double op1,vfloat64m4_t op2,size_t vl)179 vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
180 vfloat64m4_t op2, size_t vl) {
181 return vfmsub_vf_f64m4(acc, op1, op2, vl);
182 }
183
184 //
185 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
186 // CHECK-RV64-NEXT: entry:
187 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
189 //
test_vfmsub_vv_f64m8(vfloat64m8_t acc,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)190 vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
191 vfloat64m8_t op2, size_t vl) {
192 return vfmsub_vv_f64m8(acc, op1, op2, vl);
193 }
194
195 //
196 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
197 // CHECK-RV64-NEXT: entry:
198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
200 //
test_vfmsub_vf_f64m8(vfloat64m8_t acc,double op1,vfloat64m8_t op2,size_t vl)201 vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
202 vfloat64m8_t op2, size_t vl) {
203 return vfmsub_vf_f64m8(acc, op1, op2, vl);
204 }
205
206 //
207 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
211 //
test_vfmsub_vv_f32mf2_m(vbool64_t mask,vfloat32mf2_t acc,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)212 vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
213 vfloat32mf2_t op1, vfloat32mf2_t op2,
214 size_t vl) {
215 return vfmsub_vv_f32mf2_m(mask, acc, op1, op2, vl);
216 }
217
218 //
219 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
220 // CHECK-RV64-NEXT: entry:
221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
223 //
test_vfmsub_vf_f32mf2_m(vbool64_t mask,vfloat32mf2_t acc,float op1,vfloat32mf2_t op2,size_t vl)224 vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
225 float op1, vfloat32mf2_t op2, size_t vl) {
226 return vfmsub_vf_f32mf2_m(mask, acc, op1, op2, vl);
227 }
228
229 //
230 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
231 // CHECK-RV64-NEXT: entry:
232 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
234 //
test_vfmsub_vv_f32m1_m(vbool32_t mask,vfloat32m1_t acc,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)235 vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
236 vfloat32m1_t op1, vfloat32m1_t op2,
237 size_t vl) {
238 return vfmsub_vv_f32m1_m(mask, acc, op1, op2, vl);
239 }
240
241 //
242 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
243 // CHECK-RV64-NEXT: entry:
244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
245 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
246 //
test_vfmsub_vf_f32m1_m(vbool32_t mask,vfloat32m1_t acc,float op1,vfloat32m1_t op2,size_t vl)247 vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
248 vfloat32m1_t op2, size_t vl) {
249 return vfmsub_vf_f32m1_m(mask, acc, op1, op2, vl);
250 }
251
252 //
253 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
254 // CHECK-RV64-NEXT: entry:
255 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
256 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
257 //
test_vfmsub_vv_f32m2_m(vbool16_t mask,vfloat32m2_t acc,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)258 vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
259 vfloat32m2_t op1, vfloat32m2_t op2,
260 size_t vl) {
261 return vfmsub_vv_f32m2_m(mask, acc, op1, op2, vl);
262 }
263
264 //
265 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
266 // CHECK-RV64-NEXT: entry:
267 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
268 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
269 //
test_vfmsub_vf_f32m2_m(vbool16_t mask,vfloat32m2_t acc,float op1,vfloat32m2_t op2,size_t vl)270 vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
271 vfloat32m2_t op2, size_t vl) {
272 return vfmsub_vf_f32m2_m(mask, acc, op1, op2, vl);
273 }
274
275 //
276 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
277 // CHECK-RV64-NEXT: entry:
278 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
279 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
280 //
test_vfmsub_vv_f32m4_m(vbool8_t mask,vfloat32m4_t acc,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)281 vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
282 vfloat32m4_t op1, vfloat32m4_t op2,
283 size_t vl) {
284 return vfmsub_vv_f32m4_m(mask, acc, op1, op2, vl);
285 }
286
287 //
288 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
289 // CHECK-RV64-NEXT: entry:
290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
292 //
test_vfmsub_vf_f32m4_m(vbool8_t mask,vfloat32m4_t acc,float op1,vfloat32m4_t op2,size_t vl)293 vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
294 vfloat32m4_t op2, size_t vl) {
295 return vfmsub_vf_f32m4_m(mask, acc, op1, op2, vl);
296 }
297
298 //
299 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
300 // CHECK-RV64-NEXT: entry:
301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
303 //
test_vfmsub_vv_f32m8_m(vbool4_t mask,vfloat32m8_t acc,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)304 vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
305 vfloat32m8_t op1, vfloat32m8_t op2,
306 size_t vl) {
307 return vfmsub_vv_f32m8_m(mask, acc, op1, op2, vl);
308 }
309
310 //
311 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
315 //
test_vfmsub_vf_f32m8_m(vbool4_t mask,vfloat32m8_t acc,float op1,vfloat32m8_t op2,size_t vl)316 vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
317 vfloat32m8_t op2, size_t vl) {
318 return vfmsub_vf_f32m8_m(mask, acc, op1, op2, vl);
319 }
320
321 //
322 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
323 // CHECK-RV64-NEXT: entry:
324 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
325 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
326 //
test_vfmsub_vv_f64m1_m(vbool64_t mask,vfloat64m1_t acc,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)327 vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
328 vfloat64m1_t op1, vfloat64m1_t op2,
329 size_t vl) {
330 return vfmsub_vv_f64m1_m(mask, acc, op1, op2, vl);
331 }
332
333 //
334 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
335 // CHECK-RV64-NEXT: entry:
336 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
337 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
338 //
test_vfmsub_vf_f64m1_m(vbool64_t mask,vfloat64m1_t acc,double op1,vfloat64m1_t op2,size_t vl)339 vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
340 double op1, vfloat64m1_t op2, size_t vl) {
341 return vfmsub_vf_f64m1_m(mask, acc, op1, op2, vl);
342 }
343
344 //
345 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
346 // CHECK-RV64-NEXT: entry:
347 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
348 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
349 //
test_vfmsub_vv_f64m2_m(vbool32_t mask,vfloat64m2_t acc,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)350 vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
351 vfloat64m2_t op1, vfloat64m2_t op2,
352 size_t vl) {
353 return vfmsub_vv_f64m2_m(mask, acc, op1, op2, vl);
354 }
355
356 //
357 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
358 // CHECK-RV64-NEXT: entry:
359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
360 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
361 //
test_vfmsub_vf_f64m2_m(vbool32_t mask,vfloat64m2_t acc,double op1,vfloat64m2_t op2,size_t vl)362 vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
363 double op1, vfloat64m2_t op2, size_t vl) {
364 return vfmsub_vf_f64m2_m(mask, acc, op1, op2, vl);
365 }
366
367 //
368 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
372 //
test_vfmsub_vv_f64m4_m(vbool16_t mask,vfloat64m4_t acc,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)373 vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
374 vfloat64m4_t op1, vfloat64m4_t op2,
375 size_t vl) {
376 return vfmsub_vv_f64m4_m(mask, acc, op1, op2, vl);
377 }
378
379 //
380 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
381 // CHECK-RV64-NEXT: entry:
382 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
383 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
384 //
test_vfmsub_vf_f64m4_m(vbool16_t mask,vfloat64m4_t acc,double op1,vfloat64m4_t op2,size_t vl)385 vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
386 double op1, vfloat64m4_t op2, size_t vl) {
387 return vfmsub_vf_f64m4_m(mask, acc, op1, op2, vl);
388 }
389
390 //
391 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
394 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
395 //
test_vfmsub_vv_f64m8_m(vbool8_t mask,vfloat64m8_t acc,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)396 vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
397 vfloat64m8_t op1, vfloat64m8_t op2,
398 size_t vl) {
399 return vfmsub_vv_f64m8_m(mask, acc, op1, op2, vl);
400 }
401
402 //
403 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
404 // CHECK-RV64-NEXT: entry:
405 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
406 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
407 //
test_vfmsub_vf_f64m8_m(vbool8_t mask,vfloat64m8_t acc,double op1,vfloat64m8_t op2,size_t vl)408 vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
409 vfloat64m8_t op2, size_t vl) {
410 return vfmsub_vf_f64m8_m(mask, acc, op1, op2, vl);
411 }
412