1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
4 // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \
5 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6
7 #include <riscv_vector.h>
8
9 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfmsac_vv_f32mf2(vfloat32mf2_t acc,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)14 vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
15 vfloat32mf2_t op2, size_t vl) {
16 return vfmsac_vv_f32mf2(acc, op1, op2, vl);
17 }
18
19 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
23 //
test_vfmsac_vf_f32mf2(vfloat32mf2_t acc,float op1,vfloat32mf2_t op2,size_t vl)24 vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
25 vfloat32mf2_t op2, size_t vl) {
26 return vfmsac_vf_f32mf2(acc, op1, op2, vl);
27 }
28
29 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
30 // CHECK-RV64-NEXT: entry:
31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
33 //
test_vfmsac_vv_f32m1(vfloat32m1_t acc,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)34 vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
35 vfloat32m1_t op2, size_t vl) {
36 return vfmsac_vv_f32m1(acc, op1, op2, vl);
37 }
38
39 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
40 // CHECK-RV64-NEXT: entry:
41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
43 //
test_vfmsac_vf_f32m1(vfloat32m1_t acc,float op1,vfloat32m1_t op2,size_t vl)44 vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
45 size_t vl) {
46 return vfmsac_vf_f32m1(acc, op1, op2, vl);
47 }
48
49 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
50 // CHECK-RV64-NEXT: entry:
51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
53 //
test_vfmsac_vv_f32m2(vfloat32m2_t acc,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)54 vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
55 vfloat32m2_t op2, size_t vl) {
56 return vfmsac_vv_f32m2(acc, op1, op2, vl);
57 }
58
59 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
63 //
test_vfmsac_vf_f32m2(vfloat32m2_t acc,float op1,vfloat32m2_t op2,size_t vl)64 vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
65 size_t vl) {
66 return vfmsac_vf_f32m2(acc, op1, op2, vl);
67 }
68
69 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
70 // CHECK-RV64-NEXT: entry:
71 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
73 //
test_vfmsac_vv_f32m4(vfloat32m4_t acc,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)74 vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
75 vfloat32m4_t op2, size_t vl) {
76 return vfmsac_vv_f32m4(acc, op1, op2, vl);
77 }
78
79 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
80 // CHECK-RV64-NEXT: entry:
81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
83 //
test_vfmsac_vf_f32m4(vfloat32m4_t acc,float op1,vfloat32m4_t op2,size_t vl)84 vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
85 size_t vl) {
86 return vfmsac_vf_f32m4(acc, op1, op2, vl);
87 }
88
89 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
93 //
test_vfmsac_vv_f32m8(vfloat32m8_t acc,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)94 vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
95 vfloat32m8_t op2, size_t vl) {
96 return vfmsac_vv_f32m8(acc, op1, op2, vl);
97 }
98
99 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
103 //
test_vfmsac_vf_f32m8(vfloat32m8_t acc,float op1,vfloat32m8_t op2,size_t vl)104 vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
105 size_t vl) {
106 return vfmsac_vf_f32m8(acc, op1, op2, vl);
107 }
108
109 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
110 // CHECK-RV64-NEXT: entry:
111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
113 //
test_vfmsac_vv_f64m1(vfloat64m1_t acc,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)114 vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
115 vfloat64m1_t op2, size_t vl) {
116 return vfmsac_vv_f64m1(acc, op1, op2, vl);
117 }
118
119 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
123 //
test_vfmsac_vf_f64m1(vfloat64m1_t acc,double op1,vfloat64m1_t op2,size_t vl)124 vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
125 vfloat64m1_t op2, size_t vl) {
126 return vfmsac_vf_f64m1(acc, op1, op2, vl);
127 }
128
129 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
133 //
test_vfmsac_vv_f64m2(vfloat64m2_t acc,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)134 vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
135 vfloat64m2_t op2, size_t vl) {
136 return vfmsac_vv_f64m2(acc, op1, op2, vl);
137 }
138
139 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
140 // CHECK-RV64-NEXT: entry:
141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
143 //
test_vfmsac_vf_f64m2(vfloat64m2_t acc,double op1,vfloat64m2_t op2,size_t vl)144 vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
145 vfloat64m2_t op2, size_t vl) {
146 return vfmsac_vf_f64m2(acc, op1, op2, vl);
147 }
148
149 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
150 // CHECK-RV64-NEXT: entry:
151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
153 //
test_vfmsac_vv_f64m4(vfloat64m4_t acc,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)154 vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
155 vfloat64m4_t op2, size_t vl) {
156 return vfmsac_vv_f64m4(acc, op1, op2, vl);
157 }
158
159 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
160 // CHECK-RV64-NEXT: entry:
161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
163 //
test_vfmsac_vf_f64m4(vfloat64m4_t acc,double op1,vfloat64m4_t op2,size_t vl)164 vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
165 vfloat64m4_t op2, size_t vl) {
166 return vfmsac_vf_f64m4(acc, op1, op2, vl);
167 }
168
169 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
170 // CHECK-RV64-NEXT: entry:
171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
173 //
test_vfmsac_vv_f64m8(vfloat64m8_t acc,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)174 vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
175 vfloat64m8_t op2, size_t vl) {
176 return vfmsac_vv_f64m8(acc, op1, op2, vl);
177 }
178
179 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
180 // CHECK-RV64-NEXT: entry:
181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
183 //
test_vfmsac_vf_f64m8(vfloat64m8_t acc,double op1,vfloat64m8_t op2,size_t vl)184 vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
185 vfloat64m8_t op2, size_t vl) {
186 return vfmsac_vf_f64m8(acc, op1, op2, vl);
187 }
188
189 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
190 // CHECK-RV64-NEXT: entry:
191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
193 //
test_vfmsac_vv_f32mf2_m(vbool64_t mask,vfloat32mf2_t acc,vfloat32mf2_t op1,vfloat32mf2_t op2,size_t vl)194 vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
195 vfloat32mf2_t op1, vfloat32mf2_t op2,
196 size_t vl) {
197 return vfmsac_vv_f32mf2_m(mask, acc, op1, op2, vl);
198 }
199
200 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
201 // CHECK-RV64-NEXT: entry:
202 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
204 //
test_vfmsac_vf_f32mf2_m(vbool64_t mask,vfloat32mf2_t acc,float op1,vfloat32mf2_t op2,size_t vl)205 vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
206 float op1, vfloat32mf2_t op2, size_t vl) {
207 return vfmsac_vf_f32mf2_m(mask, acc, op1, op2, vl);
208 }
209
210 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
211 // CHECK-RV64-NEXT: entry:
212 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
214 //
test_vfmsac_vv_f32m1_m(vbool32_t mask,vfloat32m1_t acc,vfloat32m1_t op1,vfloat32m1_t op2,size_t vl)215 vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
216 vfloat32m1_t op1, vfloat32m1_t op2,
217 size_t vl) {
218 return vfmsac_vv_f32m1_m(mask, acc, op1, op2, vl);
219 }
220
221 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
224 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
225 //
test_vfmsac_vf_f32m1_m(vbool32_t mask,vfloat32m1_t acc,float op1,vfloat32m1_t op2,size_t vl)226 vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
227 vfloat32m1_t op2, size_t vl) {
228 return vfmsac_vf_f32m1_m(mask, acc, op1, op2, vl);
229 }
230
231 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
234 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
235 //
test_vfmsac_vv_f32m2_m(vbool16_t mask,vfloat32m2_t acc,vfloat32m2_t op1,vfloat32m2_t op2,size_t vl)236 vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
237 vfloat32m2_t op1, vfloat32m2_t op2,
238 size_t vl) {
239 return vfmsac_vv_f32m2_m(mask, acc, op1, op2, vl);
240 }
241
242 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
243 // CHECK-RV64-NEXT: entry:
244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
245 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
246 //
test_vfmsac_vf_f32m2_m(vbool16_t mask,vfloat32m2_t acc,float op1,vfloat32m2_t op2,size_t vl)247 vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
248 vfloat32m2_t op2, size_t vl) {
249 return vfmsac_vf_f32m2_m(mask, acc, op1, op2, vl);
250 }
251
252 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
253 // CHECK-RV64-NEXT: entry:
254 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
255 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
256 //
test_vfmsac_vv_f32m4_m(vbool8_t mask,vfloat32m4_t acc,vfloat32m4_t op1,vfloat32m4_t op2,size_t vl)257 vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
258 vfloat32m4_t op1, vfloat32m4_t op2,
259 size_t vl) {
260 return vfmsac_vv_f32m4_m(mask, acc, op1, op2, vl);
261 }
262
263 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
264 // CHECK-RV64-NEXT: entry:
265 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
266 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
267 //
test_vfmsac_vf_f32m4_m(vbool8_t mask,vfloat32m4_t acc,float op1,vfloat32m4_t op2,size_t vl)268 vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
269 vfloat32m4_t op2, size_t vl) {
270 return vfmsac_vf_f32m4_m(mask, acc, op1, op2, vl);
271 }
272
273 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
274 // CHECK-RV64-NEXT: entry:
275 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
276 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
277 //
test_vfmsac_vv_f32m8_m(vbool4_t mask,vfloat32m8_t acc,vfloat32m8_t op1,vfloat32m8_t op2,size_t vl)278 vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
279 vfloat32m8_t op1, vfloat32m8_t op2,
280 size_t vl) {
281 return vfmsac_vv_f32m8_m(mask, acc, op1, op2, vl);
282 }
283
284 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
285 // CHECK-RV64-NEXT: entry:
286 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
287 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
288 //
test_vfmsac_vf_f32m8_m(vbool4_t mask,vfloat32m8_t acc,float op1,vfloat32m8_t op2,size_t vl)289 vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
290 vfloat32m8_t op2, size_t vl) {
291 return vfmsac_vf_f32m8_m(mask, acc, op1, op2, vl);
292 }
293
294 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
295 // CHECK-RV64-NEXT: entry:
296 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
297 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
298 //
test_vfmsac_vv_f64m1_m(vbool64_t mask,vfloat64m1_t acc,vfloat64m1_t op1,vfloat64m1_t op2,size_t vl)299 vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
300 vfloat64m1_t op1, vfloat64m1_t op2,
301 size_t vl) {
302 return vfmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
303 }
304
305 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
306 // CHECK-RV64-NEXT: entry:
307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
308 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
309 //
test_vfmsac_vf_f64m1_m(vbool64_t mask,vfloat64m1_t acc,double op1,vfloat64m1_t op2,size_t vl)310 vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
311 double op1, vfloat64m1_t op2, size_t vl) {
312 return vfmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
313 }
314
315 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
316 // CHECK-RV64-NEXT: entry:
317 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
318 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
319 //
test_vfmsac_vv_f64m2_m(vbool32_t mask,vfloat64m2_t acc,vfloat64m2_t op1,vfloat64m2_t op2,size_t vl)320 vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
321 vfloat64m2_t op1, vfloat64m2_t op2,
322 size_t vl) {
323 return vfmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
324 }
325
326 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
327 // CHECK-RV64-NEXT: entry:
328 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
329 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
330 //
test_vfmsac_vf_f64m2_m(vbool32_t mask,vfloat64m2_t acc,double op1,vfloat64m2_t op2,size_t vl)331 vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
332 double op1, vfloat64m2_t op2, size_t vl) {
333 return vfmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
334 }
335
336 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
337 // CHECK-RV64-NEXT: entry:
338 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
339 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
340 //
test_vfmsac_vv_f64m4_m(vbool16_t mask,vfloat64m4_t acc,vfloat64m4_t op1,vfloat64m4_t op2,size_t vl)341 vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
342 vfloat64m4_t op1, vfloat64m4_t op2,
343 size_t vl) {
344 return vfmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
345 }
346
347 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
348 // CHECK-RV64-NEXT: entry:
349 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
350 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
351 //
test_vfmsac_vf_f64m4_m(vbool16_t mask,vfloat64m4_t acc,double op1,vfloat64m4_t op2,size_t vl)352 vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
353 double op1, vfloat64m4_t op2, size_t vl) {
354 return vfmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
355 }
356
357 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
358 // CHECK-RV64-NEXT: entry:
359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
360 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
361 //
test_vfmsac_vv_f64m8_m(vbool8_t mask,vfloat64m8_t acc,vfloat64m8_t op1,vfloat64m8_t op2,size_t vl)362 vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
363 vfloat64m8_t op1, vfloat64m8_t op2,
364 size_t vl) {
365 return vfmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
366 }
367
368 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
372 //
test_vfmsac_vf_f64m8_m(vbool8_t mask,vfloat64m8_t acc,double op1,vfloat64m8_t op2,size_t vl)373 vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
374 vfloat64m8_t op2, size_t vl) {
375 return vfmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
376 }
377
378 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4(
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
382 //
test_vfmsac_vv_f16mf4(vfloat16mf4_t vd,vfloat16mf4_t vs1,vfloat16mf4_t vs2,size_t vl)383 vfloat16mf4_t test_vfmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
384 return vfmsac_vv_f16mf4(vd, vs1, vs2, vl);
385 }
386
387 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4(
388 // CHECK-RV64-NEXT: entry:
389 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
390 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
391 //
test_vfmsac_vf_f16mf4(vfloat16mf4_t vd,_Float16 rs1,vfloat16mf4_t vs2,size_t vl)392 vfloat16mf4_t test_vfmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
393 return vfmsac_vf_f16mf4(vd, rs1, vs2, vl);
394 }
395
396 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2(
397 // CHECK-RV64-NEXT: entry:
398 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
399 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
400 //
test_vfmsac_vv_f16mf2(vfloat16mf2_t vd,vfloat16mf2_t vs1,vfloat16mf2_t vs2,size_t vl)401 vfloat16mf2_t test_vfmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
402 return vfmsac_vv_f16mf2(vd, vs1, vs2, vl);
403 }
404
405 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2(
406 // CHECK-RV64-NEXT: entry:
407 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
408 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
409 //
test_vfmsac_vf_f16mf2(vfloat16mf2_t vd,_Float16 rs1,vfloat16mf2_t vs2,size_t vl)410 vfloat16mf2_t test_vfmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
411 return vfmsac_vf_f16mf2(vd, rs1, vs2, vl);
412 }
413
414 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1(
415 // CHECK-RV64-NEXT: entry:
416 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
417 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
418 //
test_vfmsac_vv_f16m1(vfloat16m1_t vd,vfloat16m1_t vs1,vfloat16m1_t vs2,size_t vl)419 vfloat16m1_t test_vfmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
420 return vfmsac_vv_f16m1(vd, vs1, vs2, vl);
421 }
422
423 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1(
424 // CHECK-RV64-NEXT: entry:
425 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
426 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
427 //
test_vfmsac_vf_f16m1(vfloat16m1_t vd,_Float16 rs1,vfloat16m1_t vs2,size_t vl)428 vfloat16m1_t test_vfmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
429 return vfmsac_vf_f16m1(vd, rs1, vs2, vl);
430 }
431
432 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2(
433 // CHECK-RV64-NEXT: entry:
434 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
435 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
436 //
test_vfmsac_vv_f16m2(vfloat16m2_t vd,vfloat16m2_t vs1,vfloat16m2_t vs2,size_t vl)437 vfloat16m2_t test_vfmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
438 return vfmsac_vv_f16m2(vd, vs1, vs2, vl);
439 }
440
441 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2(
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
444 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
445 //
test_vfmsac_vf_f16m2(vfloat16m2_t vd,_Float16 rs1,vfloat16m2_t vs2,size_t vl)446 vfloat16m2_t test_vfmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
447 return vfmsac_vf_f16m2(vd, rs1, vs2, vl);
448 }
449
450 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4(
451 // CHECK-RV64-NEXT: entry:
452 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
453 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
454 //
test_vfmsac_vv_f16m4(vfloat16m4_t vd,vfloat16m4_t vs1,vfloat16m4_t vs2,size_t vl)455 vfloat16m4_t test_vfmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
456 return vfmsac_vv_f16m4(vd, vs1, vs2, vl);
457 }
458
459 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4(
460 // CHECK-RV64-NEXT: entry:
461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
463 //
test_vfmsac_vf_f16m4(vfloat16m4_t vd,_Float16 rs1,vfloat16m4_t vs2,size_t vl)464 vfloat16m4_t test_vfmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
465 return vfmsac_vf_f16m4(vd, rs1, vs2, vl);
466 }
467
468 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8(
469 // CHECK-RV64-NEXT: entry:
470 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
472 //
test_vfmsac_vv_f16m8(vfloat16m8_t vd,vfloat16m8_t vs1,vfloat16m8_t vs2,size_t vl)473 vfloat16m8_t test_vfmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
474 return vfmsac_vv_f16m8(vd, vs1, vs2, vl);
475 }
476
477 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8(
478 // CHECK-RV64-NEXT: entry:
479 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
480 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
481 //
test_vfmsac_vf_f16m8(vfloat16m8_t vd,_Float16 rs1,vfloat16m8_t vs2,size_t vl)482 vfloat16m8_t test_vfmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
483 return vfmsac_vf_f16m8(vd, rs1, vs2, vl);
484 }
485
486 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m(
487 // CHECK-RV64-NEXT: entry:
488 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
489 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
490 //
test_vfmsac_vv_f16mf4_m(vbool64_t mask,vfloat16mf4_t vd,vfloat16mf4_t vs1,vfloat16mf4_t vs2,size_t vl)491 vfloat16mf4_t test_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
492 return vfmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
493 }
494
495 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m(
496 // CHECK-RV64-NEXT: entry:
497 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
498 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
499 //
test_vfmsac_vf_f16mf4_m(vbool64_t mask,vfloat16mf4_t vd,_Float16 rs1,vfloat16mf4_t vs2,size_t vl)500 vfloat16mf4_t test_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
501 return vfmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
502 }
503
504 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m(
505 // CHECK-RV64-NEXT: entry:
506 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
508 //
test_vfmsac_vv_f16mf2_m(vbool32_t mask,vfloat16mf2_t vd,vfloat16mf2_t vs1,vfloat16mf2_t vs2,size_t vl)509 vfloat16mf2_t test_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
510 return vfmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
511 }
512
513 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m(
514 // CHECK-RV64-NEXT: entry:
515 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
516 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
517 //
test_vfmsac_vf_f16mf2_m(vbool32_t mask,vfloat16mf2_t vd,_Float16 rs1,vfloat16mf2_t vs2,size_t vl)518 vfloat16mf2_t test_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
519 return vfmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
520 }
521
522 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m(
523 // CHECK-RV64-NEXT: entry:
524 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
525 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
526 //
test_vfmsac_vv_f16m1_m(vbool16_t mask,vfloat16m1_t vd,vfloat16m1_t vs1,vfloat16m1_t vs2,size_t vl)527 vfloat16m1_t test_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
528 return vfmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl);
529 }
530
531 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m(
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
534 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
535 //
test_vfmsac_vf_f16m1_m(vbool16_t mask,vfloat16m1_t vd,_Float16 rs1,vfloat16m1_t vs2,size_t vl)536 vfloat16m1_t test_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
537 return vfmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl);
538 }
539
540 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m(
541 // CHECK-RV64-NEXT: entry:
542 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
543 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
544 //
test_vfmsac_vv_f16m2_m(vbool8_t mask,vfloat16m2_t vd,vfloat16m2_t vs1,vfloat16m2_t vs2,size_t vl)545 vfloat16m2_t test_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
546 return vfmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl);
547 }
548
549 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m(
550 // CHECK-RV64-NEXT: entry:
551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
552 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
553 //
test_vfmsac_vf_f16m2_m(vbool8_t mask,vfloat16m2_t vd,_Float16 rs1,vfloat16m2_t vs2,size_t vl)554 vfloat16m2_t test_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
555 return vfmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl);
556 }
557
558 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m(
559 // CHECK-RV64-NEXT: entry:
560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
562 //
test_vfmsac_vv_f16m4_m(vbool4_t mask,vfloat16m4_t vd,vfloat16m4_t vs1,vfloat16m4_t vs2,size_t vl)563 vfloat16m4_t test_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
564 return vfmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl);
565 }
566
567 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m(
568 // CHECK-RV64-NEXT: entry:
569 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
570 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
571 //
test_vfmsac_vf_f16m4_m(vbool4_t mask,vfloat16m4_t vd,_Float16 rs1,vfloat16m4_t vs2,size_t vl)572 vfloat16m4_t test_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
573 return vfmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl);
574 }
575
576 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m(
577 // CHECK-RV64-NEXT: entry:
578 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
579 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
580 //
test_vfmsac_vv_f16m8_m(vbool2_t mask,vfloat16m8_t vd,vfloat16m8_t vs1,vfloat16m8_t vs2,size_t vl)581 vfloat16m8_t test_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
582 return vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl);
583 }
584
585 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m(
586 // CHECK-RV64-NEXT: entry:
587 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
588 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
589 //
test_vfmsac_vf_f16m8_m(vbool2_t mask,vfloat16m8_t vd,_Float16 rs1,vfloat16m8_t vs2,size_t vl)590 vfloat16m8_t test_vfmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
591 return vfmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl);
592 }
593