1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfslide1up_vf_f32mf2(vfloat32mf2_t src,float value,size_t vl)14 vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
15                                         size_t vl) {
16   return vfslide1up_vf_f32mf2(src, value, vl);
17 }
18 
19 //
20 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
21 // CHECK-RV64-NEXT:  entry:
22 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
24 //
test_vfslide1up_vf_f32m1(vfloat32m1_t src,float value,size_t vl)25 vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
26                                       size_t vl) {
27   return vfslide1up_vf_f32m1(src, value, vl);
28 }
29 
30 //
31 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
32 // CHECK-RV64-NEXT:  entry:
33 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
35 //
test_vfslide1up_vf_f32m2(vfloat32m2_t src,float value,size_t vl)36 vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
37                                       size_t vl) {
38   return vfslide1up_vf_f32m2(src, value, vl);
39 }
40 
41 //
42 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
43 // CHECK-RV64-NEXT:  entry:
44 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
46 //
test_vfslide1up_vf_f32m4(vfloat32m4_t src,float value,size_t vl)47 vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
48                                       size_t vl) {
49   return vfslide1up_vf_f32m4(src, value, vl);
50 }
51 
52 //
53 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
54 // CHECK-RV64-NEXT:  entry:
55 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
57 //
test_vfslide1up_vf_f32m8(vfloat32m8_t src,float value,size_t vl)58 vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
59                                       size_t vl) {
60   return vfslide1up_vf_f32m8(src, value, vl);
61 }
62 
63 //
64 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
65 // CHECK-RV64-NEXT:  entry:
66 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
68 //
test_vfslide1up_vf_f64m1(vfloat64m1_t src,double value,size_t vl)69 vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
70                                       size_t vl) {
71   return vfslide1up_vf_f64m1(src, value, vl);
72 }
73 
74 //
75 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
76 // CHECK-RV64-NEXT:  entry:
77 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
79 //
test_vfslide1up_vf_f64m2(vfloat64m2_t src,double value,size_t vl)80 vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
81                                       size_t vl) {
82   return vfslide1up_vf_f64m2(src, value, vl);
83 }
84 
85 //
86 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
87 // CHECK-RV64-NEXT:  entry:
88 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
90 //
test_vfslide1up_vf_f64m4(vfloat64m4_t src,double value,size_t vl)91 vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
92                                       size_t vl) {
93   return vfslide1up_vf_f64m4(src, value, vl);
94 }
95 
96 //
97 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
98 // CHECK-RV64-NEXT:  entry:
99 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
101 //
test_vfslide1up_vf_f64m8(vfloat64m8_t src,double value,size_t vl)102 vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
103                                       size_t vl) {
104   return vfslide1up_vf_f64m8(src, value, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
112 //
test_vfslide1up_vf_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t src,float value,size_t vl)113 vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
114                                           vfloat32mf2_t maskedoff,
115                                           vfloat32mf2_t src, float value,
116                                           size_t vl) {
117   return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl);
118 }
119 
120 //
121 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
122 // CHECK-RV64-NEXT:  entry:
123 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
124 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
125 //
test_vfslide1up_vf_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t src,float value,size_t vl)126 vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
127                                         vfloat32m1_t src, float value,
128                                         size_t vl) {
129   return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl);
130 }
131 
132 //
133 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
134 // CHECK-RV64-NEXT:  entry:
135 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
136 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
137 //
test_vfslide1up_vf_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t src,float value,size_t vl)138 vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
139                                         vfloat32m2_t src, float value,
140                                         size_t vl) {
141   return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl);
142 }
143 
144 //
145 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
146 // CHECK-RV64-NEXT:  entry:
147 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
148 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
149 //
test_vfslide1up_vf_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t src,float value,size_t vl)150 vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
151                                         vfloat32m4_t src, float value,
152                                         size_t vl) {
153   return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl);
154 }
155 
156 //
157 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
158 // CHECK-RV64-NEXT:  entry:
159 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
160 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
161 //
test_vfslide1up_vf_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t src,float value,size_t vl)162 vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
163                                         vfloat32m8_t src, float value,
164                                         size_t vl) {
165   return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
173 //
test_vfslide1up_vf_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t src,double value,size_t vl)174 vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
175                                         vfloat64m1_t src, double value,
176                                         size_t vl) {
177   return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl);
178 }
179 
180 //
181 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
182 // CHECK-RV64-NEXT:  entry:
183 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
184 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
185 //
test_vfslide1up_vf_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t src,double value,size_t vl)186 vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
187                                         vfloat64m2_t src, double value,
188                                         size_t vl) {
189   return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl);
190 }
191 
192 //
193 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
194 // CHECK-RV64-NEXT:  entry:
195 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
196 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
197 //
test_vfslide1up_vf_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t src,double value,size_t vl)198 vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
199                                         vfloat64m4_t src, double value,
200                                         size_t vl) {
201   return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl);
202 }
203 
204 //
205 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
206 // CHECK-RV64-NEXT:  entry:
207 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
208 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
209 //
test_vfslide1up_vf_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t src,double value,size_t vl)210 vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
211                                         vfloat64m8_t src, double value,
212                                         size_t vl) {
213   return vfslide1up_vf_f64m8_m(mask, maskedoff, src, value, vl);
214 }
215