1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null 2>%t
5 // RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
6
7 // If this check fails please read test/CodeGen/aarch64-sve-intrinsics/README for instructions on how to resolve it.
8 // ASM-NOT: warning
9 #include <arm_sve.h>
10
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
16 #endif
17
test_svdup_n_s8(int8_t op)18 svint8_t test_svdup_n_s8(int8_t op)
19 {
20 // CHECK-LABEL: test_svdup_n_s8
21 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op)
22 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
23 return SVE_ACLE_FUNC(svdup,_n,_s8,)(op);
24 }
25
test_svdup_n_s16(int16_t op)26 svint16_t test_svdup_n_s16(int16_t op)
27 {
28 // CHECK-LABEL: test_svdup_n_s16
29 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op)
30 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
31 return SVE_ACLE_FUNC(svdup,_n,_s16,)(op);
32 }
33
test_svdup_n_s32(int32_t op)34 svint32_t test_svdup_n_s32(int32_t op)
35 {
36 // CHECK-LABEL: test_svdup_n_s32
37 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op)
38 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
39 return SVE_ACLE_FUNC(svdup,_n,_s32,)(op);
40 }
41
test_svdup_n_s64(int64_t op)42 svint64_t test_svdup_n_s64(int64_t op)
43 {
44 // CHECK-LABEL: test_svdup_n_s64
45 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op)
46 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
47 return SVE_ACLE_FUNC(svdup,_n,_s64,)(op);
48 }
49
test_svdup_n_u8(uint8_t op)50 svuint8_t test_svdup_n_u8(uint8_t op)
51 {
52 // CHECK-LABEL: test_svdup_n_u8
53 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op)
54 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
55 return SVE_ACLE_FUNC(svdup,_n,_u8,)(op);
56 }
57
test_svdup_n_u16(uint16_t op)58 svuint16_t test_svdup_n_u16(uint16_t op)
59 {
60 // CHECK-LABEL: test_svdup_n_u16
61 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op)
62 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
63 return SVE_ACLE_FUNC(svdup,_n,_u16,)(op);
64 }
65
test_svdup_n_u32(uint32_t op)66 svuint32_t test_svdup_n_u32(uint32_t op)
67 {
68 // CHECK-LABEL: test_svdup_n_u32
69 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op)
70 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
71 return SVE_ACLE_FUNC(svdup,_n,_u32,)(op);
72 }
73
test_svdup_n_u64(uint64_t op)74 svuint64_t test_svdup_n_u64(uint64_t op)
75 {
76 // CHECK-LABEL: test_svdup_n_u64
77 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op)
78 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
79 return SVE_ACLE_FUNC(svdup,_n,_u64,)(op);
80 }
81
test_svdup_n_f16(float16_t op)82 svfloat16_t test_svdup_n_f16(float16_t op)
83 {
84 // CHECK-LABEL: test_svdup_n_f16
85 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %op)
86 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
87 return SVE_ACLE_FUNC(svdup,_n,_f16,)(op);
88 }
89
test_svdup_n_f32(float32_t op)90 svfloat32_t test_svdup_n_f32(float32_t op)
91 {
92 // CHECK-LABEL: test_svdup_n_f32
93 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op)
94 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
95 return SVE_ACLE_FUNC(svdup,_n,_f32,)(op);
96 }
97
test_svdup_n_f64(float64_t op)98 svfloat64_t test_svdup_n_f64(float64_t op)
99 {
100 // CHECK-LABEL: test_svdup_n_f64
101 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op)
102 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
103 return SVE_ACLE_FUNC(svdup,_n,_f64,)(op);
104 }
105
test_svdup_n_s8_z(svbool_t pg,int8_t op)106 svint8_t test_svdup_n_s8_z(svbool_t pg, int8_t op)
107 {
108 // CHECK-LABEL: test_svdup_n_s8_z
109 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 %op)
110 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
111 return SVE_ACLE_FUNC(svdup,_n,_s8_z,)(pg, op);
112 }
113
test_svdup_n_s16_z(svbool_t pg,int16_t op)114 svint16_t test_svdup_n_s16_z(svbool_t pg, int16_t op)
115 {
116 // CHECK-LABEL: test_svdup_n_s16_z
117 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
118 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], i16 %op)
119 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
120 return SVE_ACLE_FUNC(svdup,_n,_s16_z,)(pg, op);
121 }
122
test_svdup_n_s32_z(svbool_t pg,int32_t op)123 svint32_t test_svdup_n_s32_z(svbool_t pg, int32_t op)
124 {
125 // CHECK-LABEL: test_svdup_n_s32_z
126 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
127 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], i32 %op)
128 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
129 return SVE_ACLE_FUNC(svdup,_n,_s32_z,)(pg, op);
130 }
131
test_svdup_n_s64_z(svbool_t pg,int64_t op)132 svint64_t test_svdup_n_s64_z(svbool_t pg, int64_t op)
133 {
134 // CHECK-LABEL: test_svdup_n_s64_z
135 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
136 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], i64 %op)
137 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
138 return SVE_ACLE_FUNC(svdup,_n,_s64_z,)(pg, op);
139 }
140
test_svdup_n_u8_z(svbool_t pg,uint8_t op)141 svuint8_t test_svdup_n_u8_z(svbool_t pg, uint8_t op)
142 {
143 // CHECK-LABEL: test_svdup_n_u8_z
144 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 %op)
145 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
146 return SVE_ACLE_FUNC(svdup,_n,_u8_z,)(pg, op);
147 }
148
test_svdup_n_u16_z(svbool_t pg,uint16_t op)149 svuint16_t test_svdup_n_u16_z(svbool_t pg, uint16_t op)
150 {
151 // CHECK-LABEL: test_svdup_n_u16_z
152 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
153 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %[[PG]], i16 %op)
154 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
155 return SVE_ACLE_FUNC(svdup,_n,_u16_z,)(pg, op);
156 }
157
test_svdup_n_u32_z(svbool_t pg,uint32_t op)158 svuint32_t test_svdup_n_u32_z(svbool_t pg, uint32_t op)
159 {
160 // CHECK-LABEL: test_svdup_n_u32_z
161 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
162 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %[[PG]], i32 %op)
163 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
164 return SVE_ACLE_FUNC(svdup,_n,_u32_z,)(pg, op);
165 }
166
test_svdup_n_u64_z(svbool_t pg,uint64_t op)167 svuint64_t test_svdup_n_u64_z(svbool_t pg, uint64_t op)
168 {
169 // CHECK-LABEL: test_svdup_n_u64_z
170 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
171 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %[[PG]], i64 %op)
172 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
173 return SVE_ACLE_FUNC(svdup,_n,_u64_z,)(pg, op);
174 }
175
test_svdup_n_f16_z(svbool_t pg,float16_t op)176 svfloat16_t test_svdup_n_f16_z(svbool_t pg, float16_t op)
177 {
178 // CHECK-LABEL: test_svdup_n_f16_z
179 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
180 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %[[PG]], half %op)
181 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
182 return SVE_ACLE_FUNC(svdup,_n,_f16_z,)(pg, op);
183 }
184
test_svdup_n_f32_z(svbool_t pg,float32_t op)185 svfloat32_t test_svdup_n_f32_z(svbool_t pg, float32_t op)
186 {
187 // CHECK-LABEL: test_svdup_n_f32_z
188 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
189 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %[[PG]], float %op)
190 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
191 return SVE_ACLE_FUNC(svdup,_n,_f32_z,)(pg, op);
192 }
193
test_svdup_n_f64_z(svbool_t pg,float64_t op)194 svfloat64_t test_svdup_n_f64_z(svbool_t pg, float64_t op)
195 {
196 // CHECK-LABEL: test_svdup_n_f64_z
197 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
198 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %[[PG]], double %op)
199 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
200 return SVE_ACLE_FUNC(svdup,_n,_f64_z,)(pg, op);
201 }
202
test_svdup_n_s8_m(svint8_t inactive,svbool_t pg,int8_t op)203 svint8_t test_svdup_n_s8_m(svint8_t inactive, svbool_t pg, int8_t op)
204 {
205 // CHECK-LABEL: test_svdup_n_s8_m
206 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> %pg, i8 %op)
207 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
208 return SVE_ACLE_FUNC(svdup,_n,_s8_m,)(inactive, pg, op);
209 }
210
test_svdup_n_s16_m(svint16_t inactive,svbool_t pg,int16_t op)211 svint16_t test_svdup_n_s16_m(svint16_t inactive, svbool_t pg, int16_t op)
212 {
213 // CHECK-LABEL: test_svdup_n_s16_m
214 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
215 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], i16 %op)
216 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
217 return SVE_ACLE_FUNC(svdup,_n,_s16_m,)(inactive, pg, op);
218 }
219
test_svdup_n_s32_m(svint32_t inactive,svbool_t pg,int32_t op)220 svint32_t test_svdup_n_s32_m(svint32_t inactive, svbool_t pg, int32_t op)
221 {
222 // CHECK-LABEL: test_svdup_n_s32_m
223 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
224 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], i32 %op)
225 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
226 return SVE_ACLE_FUNC(svdup,_n,_s32_m,)(inactive, pg, op);
227 }
228
test_svdup_n_s64_m(svint64_t inactive,svbool_t pg,int64_t op)229 svint64_t test_svdup_n_s64_m(svint64_t inactive, svbool_t pg, int64_t op)
230 {
231 // CHECK-LABEL: test_svdup_n_s64_m
232 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
233 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], i64 %op)
234 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
235 return SVE_ACLE_FUNC(svdup,_n,_s64_m,)(inactive, pg, op);
236 }
237
test_svdup_n_u8_m(svuint8_t inactive,svbool_t pg,uint8_t op)238 svuint8_t test_svdup_n_u8_m(svuint8_t inactive, svbool_t pg, uint8_t op)
239 {
240 // CHECK-LABEL: test_svdup_n_u8_m
241 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> %pg, i8 %op)
242 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
243 return SVE_ACLE_FUNC(svdup,_n,_u8_m,)(inactive, pg, op);
244 }
245
test_svdup_n_u16_m(svuint16_t inactive,svbool_t pg,uint16_t op)246 svuint16_t test_svdup_n_u16_m(svuint16_t inactive, svbool_t pg, uint16_t op)
247 {
248 // CHECK-LABEL: test_svdup_n_u16_m
249 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
250 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> %[[PG]], i16 %op)
251 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
252 return SVE_ACLE_FUNC(svdup,_n,_u16_m,)(inactive, pg, op);
253 }
254
test_svdup_n_u32_m(svuint32_t inactive,svbool_t pg,uint32_t op)255 svuint32_t test_svdup_n_u32_m(svuint32_t inactive, svbool_t pg, uint32_t op)
256 {
257 // CHECK-LABEL: test_svdup_n_u32_m
258 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
259 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> %[[PG]], i32 %op)
260 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
261 return SVE_ACLE_FUNC(svdup,_n,_u32_m,)(inactive, pg, op);
262 }
263
test_svdup_n_u64_m(svuint64_t inactive,svbool_t pg,uint64_t op)264 svuint64_t test_svdup_n_u64_m(svuint64_t inactive, svbool_t pg, uint64_t op)
265 {
266 // CHECK-LABEL: test_svdup_n_u64_m
267 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
268 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> %[[PG]], i64 %op)
269 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
270 return SVE_ACLE_FUNC(svdup,_n,_u64_m,)(inactive, pg, op);
271 }
272
test_svdup_n_f16_m(svfloat16_t inactive,svbool_t pg,float16_t op)273 svfloat16_t test_svdup_n_f16_m(svfloat16_t inactive, svbool_t pg, float16_t op)
274 {
275 // CHECK-LABEL: test_svdup_n_f16_m
276 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
277 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> %inactive, <vscale x 8 x i1> %[[PG]], half %op)
278 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
279 return SVE_ACLE_FUNC(svdup,_n,_f16_m,)(inactive, pg, op);
280 }
281
test_svdup_n_f32_m(svfloat32_t inactive,svbool_t pg,float32_t op)282 svfloat32_t test_svdup_n_f32_m(svfloat32_t inactive, svbool_t pg, float32_t op)
283 {
284 // CHECK-LABEL: test_svdup_n_f32_m
285 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
286 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> %inactive, <vscale x 4 x i1> %[[PG]], float %op)
287 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
288 return SVE_ACLE_FUNC(svdup,_n,_f32_m,)(inactive, pg, op);
289 }
290
test_svdup_n_f64_m(svfloat64_t inactive,svbool_t pg,float64_t op)291 svfloat64_t test_svdup_n_f64_m(svfloat64_t inactive, svbool_t pg, float64_t op)
292 {
293 // CHECK-LABEL: test_svdup_n_f64_m
294 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
295 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> %inactive, <vscale x 2 x i1> %[[PG]], double %op)
296 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
297 return SVE_ACLE_FUNC(svdup,_n,_f64_m,)(inactive, pg, op);
298 }
299
test_svdup_n_s8_x(svbool_t pg,int8_t op)300 svint8_t test_svdup_n_s8_x(svbool_t pg, int8_t op)
301 {
302 // CHECK-LABEL: test_svdup_n_s8_x
303 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %op)
304 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
305 return SVE_ACLE_FUNC(svdup,_n,_s8_x,)(pg, op);
306 }
307
test_svdup_n_s16_x(svbool_t pg,int16_t op)308 svint16_t test_svdup_n_s16_x(svbool_t pg, int16_t op)
309 {
310 // CHECK-LABEL: test_svdup_n_s16_x
311 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
312 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], i16 %op)
313 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
314 return SVE_ACLE_FUNC(svdup,_n,_s16_x,)(pg, op);
315 }
316
test_svdup_n_s32_x(svbool_t pg,int32_t op)317 svint32_t test_svdup_n_s32_x(svbool_t pg, int32_t op)
318 {
319 // CHECK-LABEL: test_svdup_n_s32_x
320 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
321 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], i32 %op)
322 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
323 return SVE_ACLE_FUNC(svdup,_n,_s32_x,)(pg, op);
324 }
325
test_svdup_n_s64_x(svbool_t pg,int64_t op)326 svint64_t test_svdup_n_s64_x(svbool_t pg, int64_t op)
327 {
328 // CHECK-LABEL: test_svdup_n_s64_x
329 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
330 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], i64 %op)
331 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
332 return SVE_ACLE_FUNC(svdup,_n,_s64_x,)(pg, op);
333 }
334
test_svdup_n_u8_x(svbool_t pg,uint8_t op)335 svuint8_t test_svdup_n_u8_x(svbool_t pg, uint8_t op)
336 {
337 // CHECK-LABEL: test_svdup_n_u8_x
338 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %op)
339 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
340 return SVE_ACLE_FUNC(svdup,_n,_u8_x,)(pg, op);
341 }
342
test_svdup_n_u16_x(svbool_t pg,uint16_t op)343 svuint16_t test_svdup_n_u16_x(svbool_t pg, uint16_t op)
344 {
345 // CHECK-LABEL: test_svdup_n_u16_x
346 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
347 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %[[PG]], i16 %op)
348 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
349 return SVE_ACLE_FUNC(svdup,_n,_u16_x,)(pg, op);
350 }
351
test_svdup_n_u32_x(svbool_t pg,uint32_t op)352 svuint32_t test_svdup_n_u32_x(svbool_t pg, uint32_t op)
353 {
354 // CHECK-LABEL: test_svdup_n_u32_x
355 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
356 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %[[PG]], i32 %op)
357 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
358 return SVE_ACLE_FUNC(svdup,_n,_u32_x,)(pg, op);
359 }
360
test_svdup_n_u64_x(svbool_t pg,uint64_t op)361 svuint64_t test_svdup_n_u64_x(svbool_t pg, uint64_t op)
362 {
363 // CHECK-LABEL: test_svdup_n_u64_x
364 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
365 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %[[PG]], i64 %op)
366 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
367 return SVE_ACLE_FUNC(svdup,_n,_u64_x,)(pg, op);
368 }
369
test_svdup_n_f16_x(svbool_t pg,float16_t op)370 svfloat16_t test_svdup_n_f16_x(svbool_t pg, float16_t op)
371 {
372 // CHECK-LABEL: test_svdup_n_f16_x
373 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
374 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %[[PG]], half %op)
375 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
376 return SVE_ACLE_FUNC(svdup,_n,_f16_x,)(pg, op);
377 }
378
test_svdup_n_f32_x(svbool_t pg,float32_t op)379 svfloat32_t test_svdup_n_f32_x(svbool_t pg, float32_t op)
380 {
381 // CHECK-LABEL: test_svdup_n_f32_x
382 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
383 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %[[PG]], float %op)
384 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
385 return SVE_ACLE_FUNC(svdup,_n,_f32_x,)(pg, op);
386 }
387
test_svdup_n_f64_x(svbool_t pg,float64_t op)388 svfloat64_t test_svdup_n_f64_x(svbool_t pg, float64_t op)
389 {
390 // CHECK-LABEL: test_svdup_n_f64_x
391 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
392 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %[[PG]], double %op)
393 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
394 return SVE_ACLE_FUNC(svdup,_n,_f64_x,)(pg, op);
395 }
396
test_svdup_lane_s8(svint8_t data,uint8_t index)397 svint8_t test_svdup_lane_s8(svint8_t data, uint8_t index)
398 {
399 // CHECK-LABEL: test_svdup_lane_s8
400 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %index)
401 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> %[[DUP]])
402 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
403 return SVE_ACLE_FUNC(svdup_lane,_s8,,)(data, index);
404 }
405
test_svdup_lane_s16(svint16_t data,uint16_t index)406 svint16_t test_svdup_lane_s16(svint16_t data, uint16_t index)
407 {
408 // CHECK-LABEL: test_svdup_lane_s16
409 // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
410 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> %[[DUP]])
411 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
412 return SVE_ACLE_FUNC(svdup_lane,_s16,,)(data, index);
413 }
414
test_svdup_lane_s32(svint32_t data,uint32_t index)415 svint32_t test_svdup_lane_s32(svint32_t data, uint32_t index)
416 {
417 // CHECK-LABEL: test_svdup_lane_s32
418 // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
419 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> %[[DUP]])
420 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
421 return SVE_ACLE_FUNC(svdup_lane,_s32,,)(data, index);
422 }
423
test_svdup_lane_s64(svint64_t data,uint64_t index)424 svint64_t test_svdup_lane_s64(svint64_t data, uint64_t index)
425 {
426 // CHECK-LABEL: test_svdup_lane_s64
427 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
428 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> %[[DUP]])
429 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
430 return SVE_ACLE_FUNC(svdup_lane,_s64,,)(data, index);
431 }
432
test_svdup_lane_u8(svuint8_t data,uint8_t index)433 svuint8_t test_svdup_lane_u8(svuint8_t data, uint8_t index)
434 {
435 // CHECK-LABEL: test_svdup_lane_u8
436 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %index)
437 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> %[[DUP]])
438 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
439 return SVE_ACLE_FUNC(svdup_lane,_u8,,)(data, index);
440 }
441
test_svdup_lane_u16(svuint16_t data,uint16_t index)442 svuint16_t test_svdup_lane_u16(svuint16_t data, uint16_t index)
443 {
444 // CHECK-LABEL: test_svdup_lane_u16
445 // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
446 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> %[[DUP]])
447 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
448 return SVE_ACLE_FUNC(svdup_lane,_u16,,)(data, index);
449 }
450
test_svdup_lane_u32(svuint32_t data,uint32_t index)451 svuint32_t test_svdup_lane_u32(svuint32_t data, uint32_t index)
452 {
453 // CHECK-LABEL: test_svdup_lane_u32
454 // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
455 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> %[[DUP]])
456 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
457 return SVE_ACLE_FUNC(svdup_lane,_u32,,)(data, index);
458 }
459
test_svdup_lane_u64(svuint64_t data,uint64_t index)460 svuint64_t test_svdup_lane_u64(svuint64_t data, uint64_t index)
461 {
462 // CHECK-LABEL: test_svdup_lane_u64
463 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
464 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> %[[DUP]])
465 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
466 return SVE_ACLE_FUNC(svdup_lane,_u64,,)(data, index);
467 }
468
test_svdup_lane_f16(svfloat16_t data,uint16_t index)469 svfloat16_t test_svdup_lane_f16(svfloat16_t data, uint16_t index)
470 {
471 // CHECK-LABEL: test_svdup_lane_f16
472 // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
473 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i16> %[[DUP]])
474 // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
475 return SVE_ACLE_FUNC(svdup_lane,_f16,,)(data, index);
476 }
477
test_svdup_lane_f32(svfloat32_t data,uint32_t index)478 svfloat32_t test_svdup_lane_f32(svfloat32_t data, uint32_t index)
479 {
480 // CHECK-LABEL: test_svdup_lane_f32
481 // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
482 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i32> %[[DUP]])
483 // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
484 return SVE_ACLE_FUNC(svdup_lane,_f32,,)(data, index);
485 }
486
test_svdup_lane_f64(svfloat64_t data,uint64_t index)487 svfloat64_t test_svdup_lane_f64(svfloat64_t data, uint64_t index)
488 {
489 // CHECK-LABEL: test_svdup_lane_f64
490 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
491 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i64> %[[DUP]])
492 // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
493 return SVE_ACLE_FUNC(svdup_lane,_f64,,)(data, index);
494 }
495
test_svdup_n_b8(bool op)496 svbool_t test_svdup_n_b8(bool op)
497 {
498 // CHECK-LABEL: test_svdup_n_b8
499 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.dup.x.nxv16i1(i1 %op)
500 // CHECK: ret <vscale x 16 x i1> %[[DUP]]
501 return SVE_ACLE_FUNC(svdup,_n,_b8,)(op);
502 }
503
test_svdup_n_b16(bool op)504 svbool_t test_svdup_n_b16(bool op)
505 {
506 // CHECK-LABEL: test_svdup_n_b16
507 // CHECK: %[[DUP:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.dup.x.nxv8i1(i1 %op)
508 // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[DUP]])
509 // CHECK: ret <vscale x 16 x i1> %[[CVT]]
510 return SVE_ACLE_FUNC(svdup,_n,_b16,)(op);
511 }
512
test_svdup_n_b32(bool op)513 svbool_t test_svdup_n_b32(bool op)
514 {
515 // CHECK-LABEL: test_svdup_n_b32
516 // CHECK: %[[DUP:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.dup.x.nxv4i1(i1 %op)
517 // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[DUP]])
518 // CHECK: ret <vscale x 16 x i1> %[[CVT]]
519 return SVE_ACLE_FUNC(svdup,_n,_b32,)(op);
520 }
521
test_svdup_n_b64(bool op)522 svbool_t test_svdup_n_b64(bool op)
523 {
524 // CHECK-LABEL: test_svdup_n_b64
525 // CHECK: %[[DUP:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.dup.x.nxv2i1(i1 %op)
526 // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[DUP]])
527 // CHECK: ret <vscale x 16 x i1> %[[CVT]]
528 return SVE_ACLE_FUNC(svdup,_n,_b64,)(op);
529 }
530