1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null
7 #include <arm_sve.h>
8 
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15 
test_svdup_n_s8(int8_t op)16 svint8_t test_svdup_n_s8(int8_t op)
17 {
18   // CHECK-LABEL: test_svdup_n_s8
19   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op)
20   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
21   return SVE_ACLE_FUNC(svdup,_n,_s8,)(op);
22 }
23 
test_svdup_n_s16(int16_t op)24 svint16_t test_svdup_n_s16(int16_t op)
25 {
26   // CHECK-LABEL: test_svdup_n_s16
27   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op)
28   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
29   return SVE_ACLE_FUNC(svdup,_n,_s16,)(op);
30 }
31 
test_svdup_n_s32(int32_t op)32 svint32_t test_svdup_n_s32(int32_t op)
33 {
34   // CHECK-LABEL: test_svdup_n_s32
35   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op)
36   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
37   return SVE_ACLE_FUNC(svdup,_n,_s32,)(op);
38 }
39 
test_svdup_n_s64(int64_t op)40 svint64_t test_svdup_n_s64(int64_t op)
41 {
42   // CHECK-LABEL: test_svdup_n_s64
43   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op)
44   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
45   return SVE_ACLE_FUNC(svdup,_n,_s64,)(op);
46 }
47 
test_svdup_n_u8(uint8_t op)48 svuint8_t test_svdup_n_u8(uint8_t op)
49 {
50   // CHECK-LABEL: test_svdup_n_u8
51   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op)
52   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
53   return SVE_ACLE_FUNC(svdup,_n,_u8,)(op);
54 }
55 
test_svdup_n_u16(uint16_t op)56 svuint16_t test_svdup_n_u16(uint16_t op)
57 {
58   // CHECK-LABEL: test_svdup_n_u16
59   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op)
60   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
61   return SVE_ACLE_FUNC(svdup,_n,_u16,)(op);
62 }
63 
test_svdup_n_u32(uint32_t op)64 svuint32_t test_svdup_n_u32(uint32_t op)
65 {
66   // CHECK-LABEL: test_svdup_n_u32
67   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op)
68   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
69   return SVE_ACLE_FUNC(svdup,_n,_u32,)(op);
70 }
71 
test_svdup_n_u64(uint64_t op)72 svuint64_t test_svdup_n_u64(uint64_t op)
73 {
74   // CHECK-LABEL: test_svdup_n_u64
75   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op)
76   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
77   return SVE_ACLE_FUNC(svdup,_n,_u64,)(op);
78 }
79 
test_svdup_n_f16(float16_t op)80 svfloat16_t test_svdup_n_f16(float16_t op)
81 {
82   // CHECK-LABEL: test_svdup_n_f16
83   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op)
84   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
85   return SVE_ACLE_FUNC(svdup,_n,_f16,)(op);
86 }
87 
test_svdup_n_f32(float32_t op)88 svfloat32_t test_svdup_n_f32(float32_t op)
89 {
90   // CHECK-LABEL: test_svdup_n_f32
91   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op)
92   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
93   return SVE_ACLE_FUNC(svdup,_n,_f32,)(op);
94 }
95 
test_svdup_n_f64(float64_t op)96 svfloat64_t test_svdup_n_f64(float64_t op)
97 {
98   // CHECK-LABEL: test_svdup_n_f64
99   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op)
100   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
101   return SVE_ACLE_FUNC(svdup,_n,_f64,)(op);
102 }
103 
test_svdup_n_s8_z(svbool_t pg,int8_t op)104 svint8_t test_svdup_n_s8_z(svbool_t pg, int8_t op)
105 {
106   // CHECK-LABEL: test_svdup_n_s8_z
107   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 %op)
108   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
109   return SVE_ACLE_FUNC(svdup,_n,_s8_z,)(pg, op);
110 }
111 
test_svdup_n_s16_z(svbool_t pg,int16_t op)112 svint16_t test_svdup_n_s16_z(svbool_t pg, int16_t op)
113 {
114   // CHECK-LABEL: test_svdup_n_s16_z
115   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
116   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], i16 %op)
117   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
118   return SVE_ACLE_FUNC(svdup,_n,_s16_z,)(pg, op);
119 }
120 
test_svdup_n_s32_z(svbool_t pg,int32_t op)121 svint32_t test_svdup_n_s32_z(svbool_t pg, int32_t op)
122 {
123   // CHECK-LABEL: test_svdup_n_s32_z
124   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
125   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], i32 %op)
126   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
127   return SVE_ACLE_FUNC(svdup,_n,_s32_z,)(pg, op);
128 }
129 
test_svdup_n_s64_z(svbool_t pg,int64_t op)130 svint64_t test_svdup_n_s64_z(svbool_t pg, int64_t op)
131 {
132   // CHECK-LABEL: test_svdup_n_s64_z
133   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
134   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], i64 %op)
135   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
136   return SVE_ACLE_FUNC(svdup,_n,_s64_z,)(pg, op);
137 }
138 
test_svdup_n_u8_z(svbool_t pg,uint8_t op)139 svuint8_t test_svdup_n_u8_z(svbool_t pg, uint8_t op)
140 {
141   // CHECK-LABEL: test_svdup_n_u8_z
142   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 %op)
143   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
144   return SVE_ACLE_FUNC(svdup,_n,_u8_z,)(pg, op);
145 }
146 
test_svdup_n_u16_z(svbool_t pg,uint16_t op)147 svuint16_t test_svdup_n_u16_z(svbool_t pg, uint16_t op)
148 {
149   // CHECK-LABEL: test_svdup_n_u16_z
150   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
151   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], i16 %op)
152   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
153   return SVE_ACLE_FUNC(svdup,_n,_u16_z,)(pg, op);
154 }
155 
test_svdup_n_u32_z(svbool_t pg,uint32_t op)156 svuint32_t test_svdup_n_u32_z(svbool_t pg, uint32_t op)
157 {
158   // CHECK-LABEL: test_svdup_n_u32_z
159   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
160   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], i32 %op)
161   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
162   return SVE_ACLE_FUNC(svdup,_n,_u32_z,)(pg, op);
163 }
164 
test_svdup_n_u64_z(svbool_t pg,uint64_t op)165 svuint64_t test_svdup_n_u64_z(svbool_t pg, uint64_t op)
166 {
167   // CHECK-LABEL: test_svdup_n_u64_z
168   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
169   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], i64 %op)
170   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
171   return SVE_ACLE_FUNC(svdup,_n,_u64_z,)(pg, op);
172 }
173 
test_svdup_n_f16_z(svbool_t pg,float16_t op)174 svfloat16_t test_svdup_n_f16_z(svbool_t pg, float16_t op)
175 {
176   // CHECK-LABEL: test_svdup_n_f16_z
177   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
178   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %[[PG]], half %op)
179   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
180   return SVE_ACLE_FUNC(svdup,_n,_f16_z,)(pg, op);
181 }
182 
test_svdup_n_f32_z(svbool_t pg,float32_t op)183 svfloat32_t test_svdup_n_f32_z(svbool_t pg, float32_t op)
184 {
185   // CHECK-LABEL: test_svdup_n_f32_z
186   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
187   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %[[PG]], float %op)
188   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
189   return SVE_ACLE_FUNC(svdup,_n,_f32_z,)(pg, op);
190 }
191 
test_svdup_n_f64_z(svbool_t pg,float64_t op)192 svfloat64_t test_svdup_n_f64_z(svbool_t pg, float64_t op)
193 {
194   // CHECK-LABEL: test_svdup_n_f64_z
195   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
196   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %[[PG]], double %op)
197   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
198   return SVE_ACLE_FUNC(svdup,_n,_f64_z,)(pg, op);
199 }
200 
test_svdup_n_s8_m(svint8_t inactive,svbool_t pg,int8_t op)201 svint8_t test_svdup_n_s8_m(svint8_t inactive, svbool_t pg, int8_t op)
202 {
203   // CHECK-LABEL: test_svdup_n_s8_m
204   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> %pg, i8 %op)
205   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
206   return SVE_ACLE_FUNC(svdup,_n,_s8_m,)(inactive, pg, op);
207 }
208 
test_svdup_n_s16_m(svint16_t inactive,svbool_t pg,int16_t op)209 svint16_t test_svdup_n_s16_m(svint16_t inactive, svbool_t pg, int16_t op)
210 {
211   // CHECK-LABEL: test_svdup_n_s16_m
212   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
213   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], i16 %op)
214   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
215   return SVE_ACLE_FUNC(svdup,_n,_s16_m,)(inactive, pg, op);
216 }
217 
test_svdup_n_s32_m(svint32_t inactive,svbool_t pg,int32_t op)218 svint32_t test_svdup_n_s32_m(svint32_t inactive, svbool_t pg, int32_t op)
219 {
220   // CHECK-LABEL: test_svdup_n_s32_m
221   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
222   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], i32 %op)
223   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
224   return SVE_ACLE_FUNC(svdup,_n,_s32_m,)(inactive, pg, op);
225 }
226 
test_svdup_n_s64_m(svint64_t inactive,svbool_t pg,int64_t op)227 svint64_t test_svdup_n_s64_m(svint64_t inactive, svbool_t pg, int64_t op)
228 {
229   // CHECK-LABEL: test_svdup_n_s64_m
230   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
231   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], i64 %op)
232   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
233   return SVE_ACLE_FUNC(svdup,_n,_s64_m,)(inactive, pg, op);
234 }
235 
test_svdup_n_u8_m(svuint8_t inactive,svbool_t pg,uint8_t op)236 svuint8_t test_svdup_n_u8_m(svuint8_t inactive, svbool_t pg, uint8_t op)
237 {
238   // CHECK-LABEL: test_svdup_n_u8_m
239   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> %pg, i8 %op)
240   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
241   return SVE_ACLE_FUNC(svdup,_n,_u8_m,)(inactive, pg, op);
242 }
243 
test_svdup_n_u16_m(svuint16_t inactive,svbool_t pg,uint16_t op)244 svuint16_t test_svdup_n_u16_m(svuint16_t inactive, svbool_t pg, uint16_t op)
245 {
246   // CHECK-LABEL: test_svdup_n_u16_m
247   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
248   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], i16 %op)
249   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
250   return SVE_ACLE_FUNC(svdup,_n,_u16_m,)(inactive, pg, op);
251 }
252 
test_svdup_n_u32_m(svuint32_t inactive,svbool_t pg,uint32_t op)253 svuint32_t test_svdup_n_u32_m(svuint32_t inactive, svbool_t pg, uint32_t op)
254 {
255   // CHECK-LABEL: test_svdup_n_u32_m
256   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
257   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], i32 %op)
258   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
259   return SVE_ACLE_FUNC(svdup,_n,_u32_m,)(inactive, pg, op);
260 }
261 
test_svdup_n_u64_m(svuint64_t inactive,svbool_t pg,uint64_t op)262 svuint64_t test_svdup_n_u64_m(svuint64_t inactive, svbool_t pg, uint64_t op)
263 {
264   // CHECK-LABEL: test_svdup_n_u64_m
265   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
266   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], i64 %op)
267   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
268   return SVE_ACLE_FUNC(svdup,_n,_u64_m,)(inactive, pg, op);
269 }
270 
test_svdup_n_f16_m(svfloat16_t inactive,svbool_t pg,float16_t op)271 svfloat16_t test_svdup_n_f16_m(svfloat16_t inactive, svbool_t pg, float16_t op)
272 {
273   // CHECK-LABEL: test_svdup_n_f16_m
274   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
275   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> %inactive, <vscale x 8 x i1> %[[PG]], half %op)
276   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
277   return SVE_ACLE_FUNC(svdup,_n,_f16_m,)(inactive, pg, op);
278 }
279 
test_svdup_n_f32_m(svfloat32_t inactive,svbool_t pg,float32_t op)280 svfloat32_t test_svdup_n_f32_m(svfloat32_t inactive, svbool_t pg, float32_t op)
281 {
282   // CHECK-LABEL: test_svdup_n_f32_m
283   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
284   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> %inactive, <vscale x 4 x i1> %[[PG]], float %op)
285   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
286   return SVE_ACLE_FUNC(svdup,_n,_f32_m,)(inactive, pg, op);
287 }
288 
test_svdup_n_f64_m(svfloat64_t inactive,svbool_t pg,float64_t op)289 svfloat64_t test_svdup_n_f64_m(svfloat64_t inactive, svbool_t pg, float64_t op)
290 {
291   // CHECK-LABEL: test_svdup_n_f64_m
292   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
293   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> %inactive, <vscale x 2 x i1> %[[PG]], double %op)
294   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
295   return SVE_ACLE_FUNC(svdup,_n,_f64_m,)(inactive, pg, op);
296 }
297 
test_svdup_n_s8_x(svbool_t pg,int8_t op)298 svint8_t test_svdup_n_s8_x(svbool_t pg, int8_t op)
299 {
300   // CHECK-LABEL: test_svdup_n_s8_x
301   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %op)
302   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
303   return SVE_ACLE_FUNC(svdup,_n,_s8_x,)(pg, op);
304 }
305 
test_svdup_n_s16_x(svbool_t pg,int16_t op)306 svint16_t test_svdup_n_s16_x(svbool_t pg, int16_t op)
307 {
308   // CHECK-LABEL: test_svdup_n_s16_x
309   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
310   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], i16 %op)
311   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
312   return SVE_ACLE_FUNC(svdup,_n,_s16_x,)(pg, op);
313 }
314 
test_svdup_n_s32_x(svbool_t pg,int32_t op)315 svint32_t test_svdup_n_s32_x(svbool_t pg, int32_t op)
316 {
317   // CHECK-LABEL: test_svdup_n_s32_x
318   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
319   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], i32 %op)
320   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
321   return SVE_ACLE_FUNC(svdup,_n,_s32_x,)(pg, op);
322 }
323 
test_svdup_n_s64_x(svbool_t pg,int64_t op)324 svint64_t test_svdup_n_s64_x(svbool_t pg, int64_t op)
325 {
326   // CHECK-LABEL: test_svdup_n_s64_x
327   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
328   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], i64 %op)
329   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
330   return SVE_ACLE_FUNC(svdup,_n,_s64_x,)(pg, op);
331 }
332 
test_svdup_n_u8_x(svbool_t pg,uint8_t op)333 svuint8_t test_svdup_n_u8_x(svbool_t pg, uint8_t op)
334 {
335   // CHECK-LABEL: test_svdup_n_u8_x
336   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %op)
337   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
338   return SVE_ACLE_FUNC(svdup,_n,_u8_x,)(pg, op);
339 }
340 
test_svdup_n_u16_x(svbool_t pg,uint16_t op)341 svuint16_t test_svdup_n_u16_x(svbool_t pg, uint16_t op)
342 {
343   // CHECK-LABEL: test_svdup_n_u16_x
344   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
345   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], i16 %op)
346   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
347   return SVE_ACLE_FUNC(svdup,_n,_u16_x,)(pg, op);
348 }
349 
test_svdup_n_u32_x(svbool_t pg,uint32_t op)350 svuint32_t test_svdup_n_u32_x(svbool_t pg, uint32_t op)
351 {
352   // CHECK-LABEL: test_svdup_n_u32_x
353   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
354   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], i32 %op)
355   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
356   return SVE_ACLE_FUNC(svdup,_n,_u32_x,)(pg, op);
357 }
358 
test_svdup_n_u64_x(svbool_t pg,uint64_t op)359 svuint64_t test_svdup_n_u64_x(svbool_t pg, uint64_t op)
360 {
361   // CHECK-LABEL: test_svdup_n_u64_x
362   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
363   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], i64 %op)
364   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
365   return SVE_ACLE_FUNC(svdup,_n,_u64_x,)(pg, op);
366 }
367 
test_svdup_n_f16_x(svbool_t pg,float16_t op)368 svfloat16_t test_svdup_n_f16_x(svbool_t pg, float16_t op)
369 {
370   // CHECK-LABEL: test_svdup_n_f16_x
371   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
372   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %[[PG]], half %op)
373   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
374   return SVE_ACLE_FUNC(svdup,_n,_f16_x,)(pg, op);
375 }
376 
test_svdup_n_f32_x(svbool_t pg,float32_t op)377 svfloat32_t test_svdup_n_f32_x(svbool_t pg, float32_t op)
378 {
379   // CHECK-LABEL: test_svdup_n_f32_x
380   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
381   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %[[PG]], float %op)
382   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
383   return SVE_ACLE_FUNC(svdup,_n,_f32_x,)(pg, op);
384 }
385 
test_svdup_n_f64_x(svbool_t pg,float64_t op)386 svfloat64_t test_svdup_n_f64_x(svbool_t pg, float64_t op)
387 {
388   // CHECK-LABEL: test_svdup_n_f64_x
389   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
390   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %[[PG]], double %op)
391   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
392   return SVE_ACLE_FUNC(svdup,_n,_f64_x,)(pg, op);
393 }
394 
test_svdup_lane_s8(svint8_t data,uint8_t index)395 svint8_t test_svdup_lane_s8(svint8_t data, uint8_t index)
396 {
397   // CHECK-LABEL: test_svdup_lane_s8
398   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %index)
399   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> %[[DUP]])
400   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
401   return SVE_ACLE_FUNC(svdup_lane,_s8,,)(data, index);
402 }
403 
test_svdup_lane_s16(svint16_t data,uint16_t index)404 svint16_t test_svdup_lane_s16(svint16_t data, uint16_t index)
405 {
406   // CHECK-LABEL: test_svdup_lane_s16
407   // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
408   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> %[[DUP]])
409   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
410   return SVE_ACLE_FUNC(svdup_lane,_s16,,)(data, index);
411 }
412 
test_svdup_lane_s32(svint32_t data,uint32_t index)413 svint32_t test_svdup_lane_s32(svint32_t data, uint32_t index)
414 {
415   // CHECK-LABEL: test_svdup_lane_s32
416   // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
417   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> %[[DUP]])
418   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
419   return SVE_ACLE_FUNC(svdup_lane,_s32,,)(data, index);
420 }
421 
test_svdup_lane_s64(svint64_t data,uint64_t index)422 svint64_t test_svdup_lane_s64(svint64_t data, uint64_t index)
423 {
424   // CHECK-LABEL: test_svdup_lane_s64
425   // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
426   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> %[[DUP]])
427   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
428   return SVE_ACLE_FUNC(svdup_lane,_s64,,)(data, index);
429 }
430 
test_svdup_lane_u8(svuint8_t data,uint8_t index)431 svuint8_t test_svdup_lane_u8(svuint8_t data, uint8_t index)
432 {
433   // CHECK-LABEL: test_svdup_lane_u8
434   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %index)
435   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> %[[DUP]])
436   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
437   return SVE_ACLE_FUNC(svdup_lane,_u8,,)(data, index);
438 }
439 
test_svdup_lane_u16(svuint16_t data,uint16_t index)440 svuint16_t test_svdup_lane_u16(svuint16_t data, uint16_t index)
441 {
442   // CHECK-LABEL: test_svdup_lane_u16
443   // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
444   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> %[[DUP]])
445   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
446   return SVE_ACLE_FUNC(svdup_lane,_u16,,)(data, index);
447 }
448 
test_svdup_lane_u32(svuint32_t data,uint32_t index)449 svuint32_t test_svdup_lane_u32(svuint32_t data, uint32_t index)
450 {
451   // CHECK-LABEL: test_svdup_lane_u32
452   // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
453   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> %[[DUP]])
454   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
455   return SVE_ACLE_FUNC(svdup_lane,_u32,,)(data, index);
456 }
457 
test_svdup_lane_u64(svuint64_t data,uint64_t index)458 svuint64_t test_svdup_lane_u64(svuint64_t data, uint64_t index)
459 {
460   // CHECK-LABEL: test_svdup_lane_u64
461   // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
462   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> %[[DUP]])
463   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
464   return SVE_ACLE_FUNC(svdup_lane,_u64,,)(data, index);
465 }
466 
test_svdup_lane_f16(svfloat16_t data,uint16_t index)467 svfloat16_t test_svdup_lane_f16(svfloat16_t data, uint16_t index)
468 {
469   // CHECK-LABEL: test_svdup_lane_f16
470   // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
471   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i16> %[[DUP]])
472   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
473   return SVE_ACLE_FUNC(svdup_lane,_f16,,)(data, index);
474 }
475 
test_svdup_lane_f32(svfloat32_t data,uint32_t index)476 svfloat32_t test_svdup_lane_f32(svfloat32_t data, uint32_t index)
477 {
478   // CHECK-LABEL: test_svdup_lane_f32
479   // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
480   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i32> %[[DUP]])
481   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
482   return SVE_ACLE_FUNC(svdup_lane,_f32,,)(data, index);
483 }
484 
test_svdup_lane_f64(svfloat64_t data,uint64_t index)485 svfloat64_t test_svdup_lane_f64(svfloat64_t data, uint64_t index)
486 {
487   // CHECK-LABEL: test_svdup_lane_f64
488   // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
489   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i64> %[[DUP]])
490   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
491   return SVE_ACLE_FUNC(svdup_lane,_f64,,)(data, index);
492 }
493 
test_svdup_n_b8(bool op)494 svbool_t test_svdup_n_b8(bool op)
495 {
496   // CHECK-LABEL: test_svdup_n_b8
497   // CHECK: %[[DUP:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.dup.x.nxv16i1(i1 %op)
498   // CHECK: ret <vscale x 16 x i1> %[[DUP]]
499   return SVE_ACLE_FUNC(svdup,_n,_b8,)(op);
500 }
501 
test_svdup_n_b16(bool op)502 svbool_t test_svdup_n_b16(bool op)
503 {
504   // CHECK-LABEL: test_svdup_n_b16
505   // CHECK: %[[DUP:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.dup.x.nxv8i1(i1 %op)
506   // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[DUP]])
507   // CHECK: ret <vscale x 16 x i1> %[[CVT]]
508   return SVE_ACLE_FUNC(svdup,_n,_b16,)(op);
509 }
510 
test_svdup_n_b32(bool op)511 svbool_t test_svdup_n_b32(bool op)
512 {
513   // CHECK-LABEL: test_svdup_n_b32
514   // CHECK: %[[DUP:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.dup.x.nxv4i1(i1 %op)
515   // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[DUP]])
516   // CHECK: ret <vscale x 16 x i1> %[[CVT]]
517   return SVE_ACLE_FUNC(svdup,_n,_b32,)(op);
518 }
519 
test_svdup_n_b64(bool op)520 svbool_t test_svdup_n_b64(bool op)
521 {
522   // CHECK-LABEL: test_svdup_n_b64
523   // CHECK: %[[DUP:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.dup.x.nxv2i1(i1 %op)
524   // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[DUP]])
525   // CHECK: ret <vscale x 16 x i1> %[[CVT]]
526   return SVE_ACLE_FUNC(svdup,_n,_b64,)(op);
527 }
528