1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5
6 #include <riscv_vector.h>
7
8 //
9 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)14 vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
15 vfloat32mf2_t op2, size_t vl) {
16 return vfnmsac_vv_f32mf2(acc, op1, op2, vl);
17 }
18
19 //
20 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
24 //
test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc,float op1,vfloat32mf2_t op2,size_t vl)25 vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
26 vfloat32mf2_t op2, size_t vl) {
27 return vfnmsac_vf_f32mf2(acc, op1, op2, vl);
28 }
29
30 //
31 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
35 //
test_vfnmsac_vv_f32m1(vfloat32m1_t acc,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)36 vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
37 vfloat32m1_t op2, size_t vl) {
38 return vfnmsac_vv_f32m1(acc, op1, op2, vl);
39 }
40
41 //
42 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
43 // CHECK-RV64-NEXT: entry:
44 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
46 //
test_vfnmsac_vf_f32m1(vfloat32m1_t acc,float op1,vfloat32m1_t op2,size_t vl)47 vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
48 vfloat32m1_t op2, size_t vl) {
49 return vfnmsac_vf_f32m1(acc, op1, op2, vl);
50 }
51
52 //
53 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
54 // CHECK-RV64-NEXT: entry:
55 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
57 //
test_vfnmsac_vv_f32m2(vfloat32m2_t acc,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)58 vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
59 vfloat32m2_t op2, size_t vl) {
60 return vfnmsac_vv_f32m2(acc, op1, op2, vl);
61 }
62
63 //
64 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
65 // CHECK-RV64-NEXT: entry:
66 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
68 //
test_vfnmsac_vf_f32m2(vfloat32m2_t acc,float op1,vfloat32m2_t op2,size_t vl)69 vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
70 vfloat32m2_t op2, size_t vl) {
71 return vfnmsac_vf_f32m2(acc, op1, op2, vl);
72 }
73
74 //
75 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
76 // CHECK-RV64-NEXT: entry:
77 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
79 //
test_vfnmsac_vv_f32m4(vfloat32m4_t acc,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)80 vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
81 vfloat32m4_t op2, size_t vl) {
82 return vfnmsac_vv_f32m4(acc, op1, op2, vl);
83 }
84
85 //
86 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
87 // CHECK-RV64-NEXT: entry:
88 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
90 //
test_vfnmsac_vf_f32m4(vfloat32m4_t acc,float op1,vfloat32m4_t op2,size_t vl)91 vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
92 vfloat32m4_t op2, size_t vl) {
93 return vfnmsac_vf_f32m4(acc, op1, op2, vl);
94 }
95
96 //
97 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
101 //
test_vfnmsac_vv_f32m8(vfloat32m8_t acc,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)102 vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
103 vfloat32m8_t op2, size_t vl) {
104 return vfnmsac_vv_f32m8(acc, op1, op2, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
112 //
test_vfnmsac_vf_f32m8(vfloat32m8_t acc,float op1,vfloat32m8_t op2,size_t vl)113 vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
114 vfloat32m8_t op2, size_t vl) {
115 return vfnmsac_vf_f32m8(acc, op1, op2, vl);
116 }
117
118 //
119 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
123 //
test_vfnmsac_vv_f64m1(vfloat64m1_t acc,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)124 vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
125 vfloat64m1_t op2, size_t vl) {
126 return vfnmsac_vv_f64m1(acc, op1, op2, vl);
127 }
128
129 //
130 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
134 //
test_vfnmsac_vf_f64m1(vfloat64m1_t acc,double op1,vfloat64m1_t op2,size_t vl)135 vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
136 vfloat64m1_t op2, size_t vl) {
137 return vfnmsac_vf_f64m1(acc, op1, op2, vl);
138 }
139
140 //
141 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
144 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
145 //
test_vfnmsac_vv_f64m2(vfloat64m2_t acc,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)146 vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
147 vfloat64m2_t op2, size_t vl) {
148 return vfnmsac_vv_f64m2(acc, op1, op2, vl);
149 }
150
151 //
152 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
153 // CHECK-RV64-NEXT: entry:
154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
156 //
test_vfnmsac_vf_f64m2(vfloat64m2_t acc,double op1,vfloat64m2_t op2,size_t vl)157 vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
158 vfloat64m2_t op2, size_t vl) {
159 return vfnmsac_vf_f64m2(acc, op1, op2, vl);
160 }
161
162 //
163 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
164 // CHECK-RV64-NEXT: entry:
165 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
167 //
test_vfnmsac_vv_f64m4(vfloat64m4_t acc,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)168 vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
169 vfloat64m4_t op2, size_t vl) {
170 return vfnmsac_vv_f64m4(acc, op1, op2, vl);
171 }
172
173 //
174 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
175 // CHECK-RV64-NEXT: entry:
176 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
177 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
178 //
test_vfnmsac_vf_f64m4(vfloat64m4_t acc,double op1,vfloat64m4_t op2,size_t vl)179 vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
180 vfloat64m4_t op2, size_t vl) {
181 return vfnmsac_vf_f64m4(acc, op1, op2, vl);
182 }
183
184 //
185 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
186 // CHECK-RV64-NEXT: entry:
187 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
189 //
test_vfnmsac_vv_f64m8(vfloat64m8_t acc,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)190 vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
191 vfloat64m8_t op2, size_t vl) {
192 return vfnmsac_vv_f64m8(acc, op1, op2, vl);
193 }
194
195 //
196 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
197 // CHECK-RV64-NEXT: entry:
198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
200 //
test_vfnmsac_vf_f64m8(vfloat64m8_t acc,double op1,vfloat64m8_t op2,size_t vl)201 vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
202 vfloat64m8_t op2, size_t vl) {
203 return vfnmsac_vf_f64m8(acc, op1, op2, vl);
204 }
205
206 //
207 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
211 //
test_vfnmsac_vv_f32mf2_m(vbool64_t mask,vfloat32mf2_t acc,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)212 vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
213 vfloat32mf2_t op1, vfloat32mf2_t op2,
214 size_t vl) {
215 return vfnmsac_vv_f32mf2_m(mask, acc, op1, op2, vl);
216 }
217
218 //
219 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
220 // CHECK-RV64-NEXT: entry:
221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
223 //
test_vfnmsac_vf_f32mf2_m(vbool64_t mask,vfloat32mf2_t acc,float op1,vfloat32mf2_t op2,size_t vl)224 vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
225 float op1, vfloat32mf2_t op2,
226 size_t vl) {
227 return vfnmsac_vf_f32mf2_m(mask, acc, op1, op2, vl);
228 }
229
230 //
231 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
234 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
235 //
test_vfnmsac_vv_f32m1_m(vbool32_t mask,vfloat32m1_t acc,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)236 vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
237 vfloat32m1_t op1, vfloat32m1_t op2,
238 size_t vl) {
239 return vfnmsac_vv_f32m1_m(mask, acc, op1, op2, vl);
240 }
241
242 //
243 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
244 // CHECK-RV64-NEXT: entry:
245 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
246 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
247 //
test_vfnmsac_vf_f32m1_m(vbool32_t mask,vfloat32m1_t acc,float op1,vfloat32m1_t op2,size_t vl)248 vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
249 float op1, vfloat32m1_t op2, size_t vl) {
250 return vfnmsac_vf_f32m1_m(mask, acc, op1, op2, vl);
251 }
252
253 //
254 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
255 // CHECK-RV64-NEXT: entry:
256 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
257 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
258 //
test_vfnmsac_vv_f32m2_m(vbool16_t mask,vfloat32m2_t acc,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)259 vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
260 vfloat32m2_t op1, vfloat32m2_t op2,
261 size_t vl) {
262 return vfnmsac_vv_f32m2_m(mask, acc, op1, op2, vl);
263 }
264
265 //
266 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
267 // CHECK-RV64-NEXT: entry:
268 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
269 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
270 //
test_vfnmsac_vf_f32m2_m(vbool16_t mask,vfloat32m2_t acc,float op1,vfloat32m2_t op2,size_t vl)271 vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
272 float op1, vfloat32m2_t op2, size_t vl) {
273 return vfnmsac_vf_f32m2_m(mask, acc, op1, op2, vl);
274 }
275
276 //
277 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
278 // CHECK-RV64-NEXT: entry:
279 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
280 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
281 //
test_vfnmsac_vv_f32m4_m(vbool8_t mask,vfloat32m4_t acc,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)282 vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
283 vfloat32m4_t op1, vfloat32m4_t op2,
284 size_t vl) {
285 return vfnmsac_vv_f32m4_m(mask, acc, op1, op2, vl);
286 }
287
288 //
289 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
290 // CHECK-RV64-NEXT: entry:
291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
292 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
293 //
test_vfnmsac_vf_f32m4_m(vbool8_t mask,vfloat32m4_t acc,float op1,vfloat32m4_t op2,size_t vl)294 vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
295 vfloat32m4_t op2, size_t vl) {
296 return vfnmsac_vf_f32m4_m(mask, acc, op1, op2, vl);
297 }
298
299 //
300 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
301 // CHECK-RV64-NEXT: entry:
302 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
303 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
304 //
test_vfnmsac_vv_f32m8_m(vbool4_t mask,vfloat32m8_t acc,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)305 vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
306 vfloat32m8_t op1, vfloat32m8_t op2,
307 size_t vl) {
308 return vfnmsac_vv_f32m8_m(mask, acc, op1, op2, vl);
309 }
310
311 //
312 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
313 // CHECK-RV64-NEXT: entry:
314 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
315 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
316 //
test_vfnmsac_vf_f32m8_m(vbool4_t mask,vfloat32m8_t acc,float op1,vfloat32m8_t op2,size_t vl)317 vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
318 vfloat32m8_t op2, size_t vl) {
319 return vfnmsac_vf_f32m8_m(mask, acc, op1, op2, vl);
320 }
321
322 //
323 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
324 // CHECK-RV64-NEXT: entry:
325 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
326 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
327 //
test_vfnmsac_vv_f64m1_m(vbool64_t mask,vfloat64m1_t acc,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)328 vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
329 vfloat64m1_t op1, vfloat64m1_t op2,
330 size_t vl) {
331 return vfnmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
332 }
333
334 //
335 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
336 // CHECK-RV64-NEXT: entry:
337 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
338 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
339 //
test_vfnmsac_vf_f64m1_m(vbool64_t mask,vfloat64m1_t acc,double op1,vfloat64m1_t op2,size_t vl)340 vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
341 double op1, vfloat64m1_t op2, size_t vl) {
342 return vfnmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
343 }
344
345 //
346 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
347 // CHECK-RV64-NEXT: entry:
348 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
349 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
350 //
test_vfnmsac_vv_f64m2_m(vbool32_t mask,vfloat64m2_t acc,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)351 vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
352 vfloat64m2_t op1, vfloat64m2_t op2,
353 size_t vl) {
354 return vfnmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
355 }
356
357 //
358 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
359 // CHECK-RV64-NEXT: entry:
360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
362 //
test_vfnmsac_vf_f64m2_m(vbool32_t mask,vfloat64m2_t acc,double op1,vfloat64m2_t op2,size_t vl)363 vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
364 double op1, vfloat64m2_t op2, size_t vl) {
365 return vfnmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
366 }
367
368 //
369 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
370 // CHECK-RV64-NEXT: entry:
371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
372 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
373 //
test_vfnmsac_vv_f64m4_m(vbool16_t mask,vfloat64m4_t acc,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)374 vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
375 vfloat64m4_t op1, vfloat64m4_t op2,
376 size_t vl) {
377 return vfnmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
378 }
379
380 //
381 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
384 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
385 //
test_vfnmsac_vf_f64m4_m(vbool16_t mask,vfloat64m4_t acc,double op1,vfloat64m4_t op2,size_t vl)386 vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
387 double op1, vfloat64m4_t op2, size_t vl) {
388 return vfnmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
389 }
390
391 //
392 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
393 // CHECK-RV64-NEXT: entry:
394 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
395 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
396 //
test_vfnmsac_vv_f64m8_m(vbool8_t mask,vfloat64m8_t acc,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)397 vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
398 vfloat64m8_t op1, vfloat64m8_t op2,
399 size_t vl) {
400 return vfnmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
401 }
402
403 //
404 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
405 // CHECK-RV64-NEXT: entry:
406 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
407 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
408 //
test_vfnmsac_vf_f64m8_m(vbool8_t mask,vfloat64m8_t acc,double op1,vfloat64m8_t op2,size_t vl)409 vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
410 double op1, vfloat64m8_t op2, size_t vl) {
411 return vfnmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
412 }
413