1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8 
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15 
test_svcnt_s8_z(svbool_t pg,svint8_t op)16 svuint8_t test_svcnt_s8_z(svbool_t pg, svint8_t op)
17 {
18   // CHECK-LABEL: test_svcnt_s8_z
19   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
20   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
21   return SVE_ACLE_FUNC(svcnt,_s8,_z,)(pg, op);
22 }
23 
test_svcnt_s16_z(svbool_t pg,svint16_t op)24 svuint16_t test_svcnt_s16_z(svbool_t pg, svint16_t op)
25 {
26   // CHECK-LABEL: test_svcnt_s16_z
27   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
28   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
29   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
30   return SVE_ACLE_FUNC(svcnt,_s16,_z,)(pg, op);
31 }
32 
test_svcnt_s32_z(svbool_t pg,svint32_t op)33 svuint32_t test_svcnt_s32_z(svbool_t pg, svint32_t op)
34 {
35   // CHECK-LABEL: test_svcnt_s32_z
36   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
37   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
38   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
39   return SVE_ACLE_FUNC(svcnt,_s32,_z,)(pg, op);
40 }
41 
test_svcnt_s64_z(svbool_t pg,svint64_t op)42 svuint64_t test_svcnt_s64_z(svbool_t pg, svint64_t op)
43 {
44   // CHECK-LABEL: test_svcnt_s64_z
45   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
46   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
47   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
48   return SVE_ACLE_FUNC(svcnt,_s64,_z,)(pg, op);
49 }
50 
test_svcnt_u8_z(svbool_t pg,svuint8_t op)51 svuint8_t test_svcnt_u8_z(svbool_t pg, svuint8_t op)
52 {
53   // CHECK-LABEL: test_svcnt_u8_z
54   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
55   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
56   return SVE_ACLE_FUNC(svcnt,_u8,_z,)(pg, op);
57 }
58 
test_svcnt_u16_z(svbool_t pg,svuint16_t op)59 svuint16_t test_svcnt_u16_z(svbool_t pg, svuint16_t op)
60 {
61   // CHECK-LABEL: test_svcnt_u16_z
62   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
63   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
64   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
65   return SVE_ACLE_FUNC(svcnt,_u16,_z,)(pg, op);
66 }
67 
test_svcnt_u32_z(svbool_t pg,svuint32_t op)68 svuint32_t test_svcnt_u32_z(svbool_t pg, svuint32_t op)
69 {
70   // CHECK-LABEL: test_svcnt_u32_z
71   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
72   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
73   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
74   return SVE_ACLE_FUNC(svcnt,_u32,_z,)(pg, op);
75 }
76 
test_svcnt_u64_z(svbool_t pg,svuint64_t op)77 svuint64_t test_svcnt_u64_z(svbool_t pg, svuint64_t op)
78 {
79   // CHECK-LABEL: test_svcnt_u64_z
80   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
81   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
82   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
83   return SVE_ACLE_FUNC(svcnt,_u64,_z,)(pg, op);
84 }
85 
test_svcnt_f16_z(svbool_t pg,svfloat16_t op)86 svuint16_t test_svcnt_f16_z(svbool_t pg, svfloat16_t op)
87 {
88   // CHECK-LABEL: test_svcnt_f16_z
89   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
90   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
91   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
92   return SVE_ACLE_FUNC(svcnt,_f16,_z,)(pg, op);
93 }
94 
test_svcnt_f32_z(svbool_t pg,svfloat32_t op)95 svuint32_t test_svcnt_f32_z(svbool_t pg, svfloat32_t op)
96 {
97   // CHECK-LABEL: test_svcnt_f32_z
98   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
99   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
100   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
101   return SVE_ACLE_FUNC(svcnt,_f32,_z,)(pg, op);
102 }
103 
test_svcnt_f64_z(svbool_t pg,svfloat64_t op)104 svuint64_t test_svcnt_f64_z(svbool_t pg, svfloat64_t op)
105 {
106   // CHECK-LABEL: test_svcnt_f64_z
107   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
108   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
109   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
110   return SVE_ACLE_FUNC(svcnt,_f64,_z,)(pg, op);
111 }
112 
test_svcnt_s8_m(svuint8_t inactive,svbool_t pg,svint8_t op)113 svuint8_t test_svcnt_s8_m(svuint8_t inactive, svbool_t pg, svint8_t op)
114 {
115   // CHECK-LABEL: test_svcnt_s8_m
116   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
117   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
118   return SVE_ACLE_FUNC(svcnt,_s8,_m,)(inactive, pg, op);
119 }
120 
test_svcnt_s16_m(svuint16_t inactive,svbool_t pg,svint16_t op)121 svuint16_t test_svcnt_s16_m(svuint16_t inactive, svbool_t pg, svint16_t op)
122 {
123   // CHECK-LABEL: test_svcnt_s16_m
124   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
125   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
126   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
127   return SVE_ACLE_FUNC(svcnt,_s16,_m,)(inactive, pg, op);
128 }
129 
test_svcnt_s32_m(svuint32_t inactive,svbool_t pg,svint32_t op)130 svuint32_t test_svcnt_s32_m(svuint32_t inactive, svbool_t pg, svint32_t op)
131 {
132   // CHECK-LABEL: test_svcnt_s32_m
133   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
134   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
135   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
136   return SVE_ACLE_FUNC(svcnt,_s32,_m,)(inactive, pg, op);
137 }
138 
test_svcnt_s64_m(svuint64_t inactive,svbool_t pg,svint64_t op)139 svuint64_t test_svcnt_s64_m(svuint64_t inactive, svbool_t pg, svint64_t op)
140 {
141   // CHECK-LABEL: test_svcnt_s64_m
142   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
143   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
144   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
145   return SVE_ACLE_FUNC(svcnt,_s64,_m,)(inactive, pg, op);
146 }
147 
test_svcnt_u8_m(svuint8_t inactive,svbool_t pg,svuint8_t op)148 svuint8_t test_svcnt_u8_m(svuint8_t inactive, svbool_t pg, svuint8_t op)
149 {
150   // CHECK-LABEL: test_svcnt_u8_m
151   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
152   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
153   return SVE_ACLE_FUNC(svcnt,_u8,_m,)(inactive, pg, op);
154 }
155 
test_svcnt_u16_m(svuint16_t inactive,svbool_t pg,svuint16_t op)156 svuint16_t test_svcnt_u16_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
157 {
158   // CHECK-LABEL: test_svcnt_u16_m
159   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
160   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
161   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
162   return SVE_ACLE_FUNC(svcnt,_u16,_m,)(inactive, pg, op);
163 }
164 
test_svcnt_u32_m(svuint32_t inactive,svbool_t pg,svuint32_t op)165 svuint32_t test_svcnt_u32_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
166 {
167   // CHECK-LABEL: test_svcnt_u32_m
168   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
169   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
170   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
171   return SVE_ACLE_FUNC(svcnt,_u32,_m,)(inactive, pg, op);
172 }
173 
test_svcnt_u64_m(svuint64_t inactive,svbool_t pg,svuint64_t op)174 svuint64_t test_svcnt_u64_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
175 {
176   // CHECK-LABEL: test_svcnt_u64_m
177   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
178   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
179   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
180   return SVE_ACLE_FUNC(svcnt,_u64,_m,)(inactive, pg, op);
181 }
182 
test_svcnt_f16_m(svuint16_t inactive,svbool_t pg,svfloat16_t op)183 svuint16_t test_svcnt_f16_m(svuint16_t inactive, svbool_t pg, svfloat16_t op)
184 {
185   // CHECK-LABEL: test_svcnt_f16_m
186   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
187   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8f16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
188   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
189   return SVE_ACLE_FUNC(svcnt,_f16,_m,)(inactive, pg, op);
190 }
191 
test_svcnt_f32_m(svuint32_t inactive,svbool_t pg,svfloat32_t op)192 svuint32_t test_svcnt_f32_m(svuint32_t inactive, svbool_t pg, svfloat32_t op)
193 {
194   // CHECK-LABEL: test_svcnt_f32_m
195   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
196   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4f32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
197   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
198   return SVE_ACLE_FUNC(svcnt,_f32,_m,)(inactive, pg, op);
199 }
200 
test_svcnt_f64_m(svuint64_t inactive,svbool_t pg,svfloat64_t op)201 svuint64_t test_svcnt_f64_m(svuint64_t inactive, svbool_t pg, svfloat64_t op)
202 {
203   // CHECK-LABEL: test_svcnt_f64_m
204   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
205   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2f64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
206   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
207   return SVE_ACLE_FUNC(svcnt,_f64,_m,)(inactive, pg, op);
208 }
209 
test_svcnt_s8_x(svbool_t pg,svint8_t op)210 svuint8_t test_svcnt_s8_x(svbool_t pg, svint8_t op)
211 {
212   // CHECK-LABEL: test_svcnt_s8_x
213   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
214   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
215   return SVE_ACLE_FUNC(svcnt,_s8,_x,)(pg, op);
216 }
217 
test_svcnt_s16_x(svbool_t pg,svint16_t op)218 svuint16_t test_svcnt_s16_x(svbool_t pg, svint16_t op)
219 {
220   // CHECK-LABEL: test_svcnt_s16_x
221   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
222   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
223   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
224   return SVE_ACLE_FUNC(svcnt,_s16,_x,)(pg, op);
225 }
226 
test_svcnt_s32_x(svbool_t pg,svint32_t op)227 svuint32_t test_svcnt_s32_x(svbool_t pg, svint32_t op)
228 {
229   // CHECK-LABEL: test_svcnt_s32_x
230   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
231   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
232   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
233   return SVE_ACLE_FUNC(svcnt,_s32,_x,)(pg, op);
234 }
235 
test_svcnt_s64_x(svbool_t pg,svint64_t op)236 svuint64_t test_svcnt_s64_x(svbool_t pg, svint64_t op)
237 {
238   // CHECK-LABEL: test_svcnt_s64_x
239   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
240   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
241   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
242   return SVE_ACLE_FUNC(svcnt,_s64,_x,)(pg, op);
243 }
244 
test_svcnt_u8_x(svbool_t pg,svuint8_t op)245 svuint8_t test_svcnt_u8_x(svbool_t pg, svuint8_t op)
246 {
247   // CHECK-LABEL: test_svcnt_u8_x
248   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %op)
249   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
250   return SVE_ACLE_FUNC(svcnt,_u8,_x,)(pg, op);
251 }
252 
test_svcnt_u16_x(svbool_t pg,svuint16_t op)253 svuint16_t test_svcnt_u16_x(svbool_t pg, svuint16_t op)
254 {
255   // CHECK-LABEL: test_svcnt_u16_x
256   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
257   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op)
258   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
259   return SVE_ACLE_FUNC(svcnt,_u16,_x,)(pg, op);
260 }
261 
test_svcnt_u32_x(svbool_t pg,svuint32_t op)262 svuint32_t test_svcnt_u32_x(svbool_t pg, svuint32_t op)
263 {
264   // CHECK-LABEL: test_svcnt_u32_x
265   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
266   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op)
267   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
268   return SVE_ACLE_FUNC(svcnt,_u32,_x,)(pg, op);
269 }
270 
test_svcnt_u64_x(svbool_t pg,svuint64_t op)271 svuint64_t test_svcnt_u64_x(svbool_t pg, svuint64_t op)
272 {
273   // CHECK-LABEL: test_svcnt_u64_x
274   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
275   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op)
276   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
277   return SVE_ACLE_FUNC(svcnt,_u64,_x,)(pg, op);
278 }
279 
test_svcnt_f16_x(svbool_t pg,svfloat16_t op)280 svuint16_t test_svcnt_f16_x(svbool_t pg, svfloat16_t op)
281 {
282   // CHECK-LABEL: test_svcnt_f16_x
283   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
284   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8f16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op)
285   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
286   return SVE_ACLE_FUNC(svcnt,_f16,_x,)(pg, op);
287 }
288 
test_svcnt_f32_x(svbool_t pg,svfloat32_t op)289 svuint32_t test_svcnt_f32_x(svbool_t pg, svfloat32_t op)
290 {
291   // CHECK-LABEL: test_svcnt_f32_x
292   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
293   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4f32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op)
294   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
295   return SVE_ACLE_FUNC(svcnt,_f32,_x,)(pg, op);
296 }
297 
test_svcnt_f64_x(svbool_t pg,svfloat64_t op)298 svuint64_t test_svcnt_f64_x(svbool_t pg, svfloat64_t op)
299 {
300   // CHECK-LABEL: test_svcnt_f64_x
301   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
302   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2f64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op)
303   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
304   return SVE_ACLE_FUNC(svcnt,_f64,_x,)(pg, op);
305 }
306