1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
13 //
test_vfsqrt_v_f32mf2(vfloat32mf2_t op1,size_t vl)14 vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
15   return vfsqrt_v_f32mf2(op1, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
23 //
test_vfsqrt_v_f32m1(vfloat32m1_t op1,size_t vl)24 vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
25   return vfsqrt_v_f32m1(op1, vl);
26 }
27 
28 //
29 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
33 //
test_vfsqrt_v_f32m2(vfloat32m2_t op1,size_t vl)34 vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
35   return vfsqrt_v_f32m2(op1, vl);
36 }
37 
38 //
39 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
43 //
test_vfsqrt_v_f32m4(vfloat32m4_t op1,size_t vl)44 vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
45   return vfsqrt_v_f32m4(op1, vl);
46 }
47 
48 //
49 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
53 //
test_vfsqrt_v_f32m8(vfloat32m8_t op1,size_t vl)54 vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
55   return vfsqrt_v_f32m8(op1, vl);
56 }
57 
58 //
59 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
63 //
test_vfsqrt_v_f64m1(vfloat64m1_t op1,size_t vl)64 vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
65   return vfsqrt_v_f64m1(op1, vl);
66 }
67 
68 //
69 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
73 //
test_vfsqrt_v_f64m2(vfloat64m2_t op1,size_t vl)74 vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
75   return vfsqrt_v_f64m2(op1, vl);
76 }
77 
78 //
79 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
83 //
test_vfsqrt_v_f64m4(vfloat64m4_t op1,size_t vl)84 vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
85   return vfsqrt_v_f64m4(op1, vl);
86 }
87 
88 //
89 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
93 //
test_vfsqrt_v_f64m8(vfloat64m8_t op1,size_t vl)94 vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
95   return vfsqrt_v_f64m8(op1, vl);
96 }
97 
98 //
99 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
103 //
test_vfsqrt_v_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t op1,size_t vl)104 vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
105                                      vfloat32mf2_t op1, size_t vl) {
106   return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl);
107 }
108 
109 //
110 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m(
111 // CHECK-RV64-NEXT:  entry:
112 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
113 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
114 //
test_vfsqrt_v_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t op1,size_t vl)115 vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
116                                    vfloat32m1_t op1, size_t vl) {
117   return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl);
118 }
119 
120 //
121 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m(
122 // CHECK-RV64-NEXT:  entry:
123 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
124 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
125 //
test_vfsqrt_v_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t op1,size_t vl)126 vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
127                                    vfloat32m2_t op1, size_t vl) {
128   return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl);
129 }
130 
131 //
132 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m(
133 // CHECK-RV64-NEXT:  entry:
134 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
135 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
136 //
test_vfsqrt_v_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t op1,size_t vl)137 vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
138                                    vfloat32m4_t op1, size_t vl) {
139   return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl);
140 }
141 
142 //
143 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m(
144 // CHECK-RV64-NEXT:  entry:
145 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
146 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
147 //
test_vfsqrt_v_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t op1,size_t vl)148 vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
149                                    vfloat32m8_t op1, size_t vl) {
150   return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl);
151 }
152 
153 //
154 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m(
155 // CHECK-RV64-NEXT:  entry:
156 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
157 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
158 //
test_vfsqrt_v_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t op1,size_t vl)159 vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
160                                    vfloat64m1_t op1, size_t vl) {
161   return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl);
162 }
163 
164 //
165 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m(
166 // CHECK-RV64-NEXT:  entry:
167 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
168 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
169 //
test_vfsqrt_v_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t op1,size_t vl)170 vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
171                                    vfloat64m2_t op1, size_t vl) {
172   return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl);
173 }
174 
175 //
176 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m(
177 // CHECK-RV64-NEXT:  entry:
178 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
179 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
180 //
test_vfsqrt_v_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t op1,size_t vl)181 vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
182                                    vfloat64m4_t op1, size_t vl) {
183   return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl);
184 }
185 
186 //
187 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m(
188 // CHECK-RV64-NEXT:  entry:
189 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
190 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
191 //
test_vfsqrt_v_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t op1,size_t vl)192 vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
193                                    vfloat64m8_t op1, size_t vl) {
194   return vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl);
195 }
196