1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
4 // RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
5 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 
7 #include <riscv_vector.h>
8 
9 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
13 //
test_vfrsub_vf_f16mf4(vfloat16mf4_t op1,_Float16 op2,size_t vl)14 vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
15   return vfrsub_vf_f16mf4(op1, op2, vl);
16 }
17 
18 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
22 //
test_vfrsub_vf_f16mf2(vfloat16mf2_t op1,_Float16 op2,size_t vl)23 vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
24   return vfrsub_vf_f16mf2(op1, op2, vl);
25 }
26 
27 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1(
28 // CHECK-RV64-NEXT:  entry:
29 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
31 //
test_vfrsub_vf_f16m1(vfloat16m1_t op1,_Float16 op2,size_t vl)32 vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
33   return vfrsub_vf_f16m1(op1, op2, vl);
34 }
35 
36 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2(
37 // CHECK-RV64-NEXT:  entry:
38 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
39 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
40 //
test_vfrsub_vf_f16m2(vfloat16m2_t op1,_Float16 op2,size_t vl)41 vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
42   return vfrsub_vf_f16m2(op1, op2, vl);
43 }
44 
45 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4(
46 // CHECK-RV64-NEXT:  entry:
47 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
49 //
test_vfrsub_vf_f16m4(vfloat16m4_t op1,_Float16 op2,size_t vl)50 vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
51   return vfrsub_vf_f16m4(op1, op2, vl);
52 }
53 
54 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8(
55 // CHECK-RV64-NEXT:  entry:
56 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
58 //
test_vfrsub_vf_f16m8(vfloat16m8_t op1,_Float16 op2,size_t vl)59 vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
60   return vfrsub_vf_f16m8(op1, op2, vl);
61 }
62 
63 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
64 // CHECK-RV64-NEXT:  entry:
65 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
67 //
test_vfrsub_vf_f32mf2(vfloat32mf2_t op1,float op2,size_t vl)68 vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
69   return vfrsub_vf_f32mf2(op1, op2, vl);
70 }
71 
72 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
76 //
test_vfrsub_vf_f32m1(vfloat32m1_t op1,float op2,size_t vl)77 vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
78   return vfrsub_vf_f32m1(op1, op2, vl);
79 }
80 
81 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
82 // CHECK-RV64-NEXT:  entry:
83 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
84 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
85 //
test_vfrsub_vf_f32m2(vfloat32m2_t op1,float op2,size_t vl)86 vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
87   return vfrsub_vf_f32m2(op1, op2, vl);
88 }
89 
90 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
91 // CHECK-RV64-NEXT:  entry:
92 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
93 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
94 //
test_vfrsub_vf_f32m4(vfloat32m4_t op1,float op2,size_t vl)95 vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
96   return vfrsub_vf_f32m4(op1, op2, vl);
97 }
98 
99 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
103 //
test_vfrsub_vf_f32m8(vfloat32m8_t op1,float op2,size_t vl)104 vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
105   return vfrsub_vf_f32m8(op1, op2, vl);
106 }
107 
108 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
112 //
test_vfrsub_vf_f64m1(vfloat64m1_t op1,double op2,size_t vl)113 vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
114   return vfrsub_vf_f64m1(op1, op2, vl);
115 }
116 
117 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
118 // CHECK-RV64-NEXT:  entry:
119 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
120 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
121 //
test_vfrsub_vf_f64m2(vfloat64m2_t op1,double op2,size_t vl)122 vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
123   return vfrsub_vf_f64m2(op1, op2, vl);
124 }
125 
126 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
127 // CHECK-RV64-NEXT:  entry:
128 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
129 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
130 //
test_vfrsub_vf_f64m4(vfloat64m4_t op1,double op2,size_t vl)131 vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
132   return vfrsub_vf_f64m4(op1, op2, vl);
133 }
134 
135 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
138 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
139 //
test_vfrsub_vf_f64m8(vfloat64m8_t op1,double op2,size_t vl)140 vfloat64m8_t test_vfrsub_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) {
141   return vfrsub_vf_f64m8(op1, op2, vl);
142 }
143 
144 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m(
145 // CHECK-RV64-NEXT:  entry:
146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
147 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
148 //
test_vfrsub_vf_f16mf4_m(vbool64_t mask,vfloat16mf4_t maskedoff,vfloat16mf4_t op1,_Float16 op2,size_t vl)149 vfloat16mf4_t test_vfrsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
150   return vfrsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
151 }
152 
153 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m(
154 // CHECK-RV64-NEXT:  entry:
155 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
156 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
157 //
test_vfrsub_vf_f16mf2_m(vbool32_t mask,vfloat16mf2_t maskedoff,vfloat16mf2_t op1,_Float16 op2,size_t vl)158 vfloat16mf2_t test_vfrsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
159   return vfrsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
160 }
161 
162 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m(
163 // CHECK-RV64-NEXT:  entry:
164 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
165 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
166 //
test_vfrsub_vf_f16m1_m(vbool16_t mask,vfloat16m1_t maskedoff,vfloat16m1_t op1,_Float16 op2,size_t vl)167 vfloat16m1_t test_vfrsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
168   return vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
169 }
170 
171 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m(
172 // CHECK-RV64-NEXT:  entry:
173 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
174 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
175 //
test_vfrsub_vf_f16m2_m(vbool8_t mask,vfloat16m2_t maskedoff,vfloat16m2_t op1,_Float16 op2,size_t vl)176 vfloat16m2_t test_vfrsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
177   return vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
178 }
179 
180 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m(
181 // CHECK-RV64-NEXT:  entry:
182 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
183 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
184 //
test_vfrsub_vf_f16m4_m(vbool4_t mask,vfloat16m4_t maskedoff,vfloat16m4_t op1,_Float16 op2,size_t vl)185 vfloat16m4_t test_vfrsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
186   return vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
187 }
188 
189 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
192 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
193 //
test_vfrsub_vf_f16m8_m(vbool2_t mask,vfloat16m8_t maskedoff,vfloat16m8_t op1,_Float16 op2,size_t vl)194 vfloat16m8_t test_vfrsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
195   return vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
196 }
197 
198 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
201 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
202 //
test_vfrsub_vf_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t op1,float op2,size_t vl)203 vfloat32mf2_t test_vfrsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
204   return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
205 }
206 
207 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
208 // CHECK-RV64-NEXT:  entry:
209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
210 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
211 //
test_vfrsub_vf_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t op1,float op2,size_t vl)212 vfloat32m1_t test_vfrsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
213   return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
214 }
215 
216 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
217 // CHECK-RV64-NEXT:  entry:
218 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
219 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
220 //
test_vfrsub_vf_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t op1,float op2,size_t vl)221 vfloat32m2_t test_vfrsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
222   return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
223 }
224 
225 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
226 // CHECK-RV64-NEXT:  entry:
227 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
228 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
229 //
test_vfrsub_vf_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t op1,float op2,size_t vl)230 vfloat32m4_t test_vfrsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
231   return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
232 }
233 
234 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
235 // CHECK-RV64-NEXT:  entry:
236 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
237 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
238 //
test_vfrsub_vf_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t op1,float op2,size_t vl)239 vfloat32m8_t test_vfrsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
240   return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
241 }
242 
243 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
244 // CHECK-RV64-NEXT:  entry:
245 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
246 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
247 //
test_vfrsub_vf_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,double op2,size_t vl)248 vfloat64m1_t test_vfrsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
249   return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
250 }
251 
252 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
253 // CHECK-RV64-NEXT:  entry:
254 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
255 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
256 //
test_vfrsub_vf_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,double op2,size_t vl)257 vfloat64m2_t test_vfrsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
258   return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
259 }
260 
261 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
264 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
265 //
test_vfrsub_vf_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,double op2,size_t vl)266 vfloat64m4_t test_vfrsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
267   return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
268 }
269 
270 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
271 // CHECK-RV64-NEXT:  entry:
272 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
273 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
274 //
test_vfrsub_vf_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,double op2,size_t vl)275 vfloat64m8_t test_vfrsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
276   return vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
277 }
278 
279 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_mt(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
282 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
283 //
test_vfrsub_vf_f16mf4_mt(vbool64_t mask,vfloat16mf4_t maskedoff,vfloat16mf4_t op1,_Float16 op2,size_t vl,uint8_t ta)284 vfloat16mf4_t test_vfrsub_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) {
285   return vfrsub_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
286 }
287 
288 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_mt(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
291 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
292 //
test_vfrsub_vf_f16mf2_mt(vbool32_t mask,vfloat16mf2_t maskedoff,vfloat16mf2_t op1,_Float16 op2,size_t vl,uint8_t ta)293 vfloat16mf2_t test_vfrsub_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) {
294   return vfrsub_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
295 }
296 
297 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_mt(
298 // CHECK-RV64-NEXT:  entry:
299 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
300 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
301 //
test_vfrsub_vf_f16m1_mt(vbool16_t mask,vfloat16m1_t maskedoff,vfloat16m1_t op1,_Float16 op2,size_t vl,uint8_t ta)302 vfloat16m1_t test_vfrsub_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) {
303   return vfrsub_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
304 }
305 
306 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_mt(
307 // CHECK-RV64-NEXT:  entry:
308 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
309 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
310 //
test_vfrsub_vf_f16m2_mt(vbool8_t mask,vfloat16m2_t maskedoff,vfloat16m2_t op1,_Float16 op2,size_t vl,uint8_t ta)311 vfloat16m2_t test_vfrsub_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) {
312   return vfrsub_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
313 }
314 
315 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_mt(
316 // CHECK-RV64-NEXT:  entry:
317 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
318 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
319 //
test_vfrsub_vf_f16m4_mt(vbool4_t mask,vfloat16m4_t maskedoff,vfloat16m4_t op1,_Float16 op2,size_t vl,uint8_t ta)320 vfloat16m4_t test_vfrsub_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) {
321   return vfrsub_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
322 }
323 
324 // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_mt(
325 // CHECK-RV64-NEXT:  entry:
326 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
327 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
328 //
test_vfrsub_vf_f16m8_mt(vbool2_t mask,vfloat16m8_t maskedoff,vfloat16m8_t op1,_Float16 op2,size_t vl,uint8_t ta)329 vfloat16m8_t test_vfrsub_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) {
330   return vfrsub_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
331 }
332 
333 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_mt(
334 // CHECK-RV64-NEXT:  entry:
335 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
336 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
337 //
test_vfrsub_vf_f32mf2_mt(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t op1,float op2,size_t vl,uint8_t ta)338 vfloat32mf2_t test_vfrsub_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) {
339   return vfrsub_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
340 }
341 
342 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_mt(
343 // CHECK-RV64-NEXT:  entry:
344 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
345 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
346 //
test_vfrsub_vf_f32m1_mt(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t op1,float op2,size_t vl,uint8_t ta)347 vfloat32m1_t test_vfrsub_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) {
348   return vfrsub_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
349 }
350 
351 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_mt(
352 // CHECK-RV64-NEXT:  entry:
353 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
354 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
355 //
test_vfrsub_vf_f32m2_mt(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t op1,float op2,size_t vl,uint8_t ta)356 vfloat32m2_t test_vfrsub_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) {
357   return vfrsub_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
358 }
359 
360 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_mt(
361 // CHECK-RV64-NEXT:  entry:
362 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
363 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
364 //
test_vfrsub_vf_f32m4_mt(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t op1,float op2,size_t vl,uint8_t ta)365 vfloat32m4_t test_vfrsub_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) {
366   return vfrsub_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
367 }
368 
369 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_mt(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
372 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
373 //
test_vfrsub_vf_f32m8_mt(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t op1,float op2,size_t vl,uint8_t ta)374 vfloat32m8_t test_vfrsub_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) {
375   return vfrsub_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
376 }
377 
378 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_mt(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
381 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
382 //
test_vfrsub_vf_f64m1_mt(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,double op2,size_t vl,uint8_t ta)383 vfloat64m1_t test_vfrsub_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) {
384   return vfrsub_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
385 }
386 
387 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_mt(
388 // CHECK-RV64-NEXT:  entry:
389 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
390 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
391 //
test_vfrsub_vf_f64m2_mt(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,double op2,size_t vl,uint8_t ta)392 vfloat64m2_t test_vfrsub_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) {
393   return vfrsub_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
394 }
395 
396 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_mt(
397 // CHECK-RV64-NEXT:  entry:
398 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
399 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
400 //
test_vfrsub_vf_f64m4_mt(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,double op2,size_t vl,uint8_t ta)401 vfloat64m4_t test_vfrsub_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) {
402   return vfrsub_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
403 }
404 
405 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_mt(
406 // CHECK-RV64-NEXT:  entry:
407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
408 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
409 //
test_vfrsub_vf_f64m8_mt(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,double op2,size_t vl,uint8_t ta)410 vfloat64m8_t test_vfrsub_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) {
411   return vfrsub_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
412 }
413 
414