1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null
7 #include <arm_sve.h>
8
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15
test_svld1sh_s32(svbool_t pg,const int16_t * base)16 svint32_t test_svld1sh_s32(svbool_t pg, const int16_t *base)
17 {
18 // CHECK-LABEL: test_svld1sh_s32
19 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
20 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base)
21 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
22 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
23 return svld1sh_s32(pg, base);
24 }
25
test_svld1sh_s64(svbool_t pg,const int16_t * base)26 svint64_t test_svld1sh_s64(svbool_t pg, const int16_t *base)
27 {
28 // CHECK-LABEL: test_svld1sh_s64
29 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
30 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base)
31 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
32 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
33 return svld1sh_s64(pg, base);
34 }
35
test_svld1sh_u32(svbool_t pg,const int16_t * base)36 svuint32_t test_svld1sh_u32(svbool_t pg, const int16_t *base)
37 {
38 // CHECK-LABEL: test_svld1sh_u32
39 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
40 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base)
41 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
42 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
43 return svld1sh_u32(pg, base);
44 }
45
test_svld1sh_u64(svbool_t pg,const int16_t * base)46 svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base)
47 {
48 // CHECK-LABEL: test_svld1sh_u64
49 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
50 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base)
51 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
52 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
53 return svld1sh_u64(pg, base);
54 }
55
test_svld1sh_vnum_s32(svbool_t pg,const int16_t * base,int64_t vnum)56 svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum)
57 {
58 // CHECK-LABEL: test_svld1sh_vnum_s32
59 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
60 // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>*
61 // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BASE]], i64 %vnum, i64 0
62 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]])
63 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
64 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
65 return svld1sh_vnum_s32(pg, base, vnum);
66 }
67
test_svld1sh_vnum_s64(svbool_t pg,const int16_t * base,int64_t vnum)68 svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum)
69 {
70 // CHECK-LABEL: test_svld1sh_vnum_s64
71 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
72 // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>*
73 // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BASE]], i64 %vnum, i64 0
74 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]])
75 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
76 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
77 return svld1sh_vnum_s64(pg, base, vnum);
78 }
79
test_svld1sh_vnum_u32(svbool_t pg,const int16_t * base,int64_t vnum)80 svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum)
81 {
82 // CHECK-LABEL: test_svld1sh_vnum_u32
83 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
84 // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>*
85 // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BASE]], i64 %vnum, i64 0
86 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]])
87 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
88 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
89 return svld1sh_vnum_u32(pg, base, vnum);
90 }
91
test_svld1sh_vnum_u64(svbool_t pg,const int16_t * base,int64_t vnum)92 svuint64_t test_svld1sh_vnum_u64(svbool_t pg, const int16_t *base, int64_t vnum)
93 {
94 // CHECK-LABEL: test_svld1sh_vnum_u64
95 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
96 // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>*
97 // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BASE]], i64 %vnum, i64 0
98 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]])
99 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
100 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
101 return svld1sh_vnum_u64(pg, base, vnum);
102 }
103
test_svld1sh_gather_u32base_s32(svbool_t pg,svuint32_t bases)104 svint32_t test_svld1sh_gather_u32base_s32(svbool_t pg, svuint32_t bases) {
105 // CHECK-LABEL: test_svld1sh_gather_u32base_s32
106 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
107 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
108 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
109 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
110 return SVE_ACLE_FUNC(svld1sh_gather, _u32base, _s32, )(pg, bases);
111 }
112
test_svld1sh_gather_u64base_s64(svbool_t pg,svuint64_t bases)113 svint64_t test_svld1sh_gather_u64base_s64(svbool_t pg, svuint64_t bases) {
114 // CHECK-LABEL: test_svld1sh_gather_u64base_s64
115 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
116 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
117 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
118 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
119 return SVE_ACLE_FUNC(svld1sh_gather, _u64base, _s64, )(pg, bases);
120 }
121
test_svld1sh_gather_u32base_u32(svbool_t pg,svuint32_t bases)122 svuint32_t test_svld1sh_gather_u32base_u32(svbool_t pg, svuint32_t bases) {
123 // CHECK-LABEL: test_svld1sh_gather_u32base_u32
124 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
125 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
126 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
127 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
128 return SVE_ACLE_FUNC(svld1sh_gather, _u32base, _u32, )(pg, bases);
129 }
130
test_svld1sh_gather_u64base_u64(svbool_t pg,svuint64_t bases)131 svuint64_t test_svld1sh_gather_u64base_u64(svbool_t pg, svuint64_t bases) {
132 // CHECK-LABEL: test_svld1sh_gather_u64base_u64
133 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
134 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
135 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
136 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
137 return SVE_ACLE_FUNC(svld1sh_gather, _u64base, _u64, )(pg, bases);
138 }
139
test_svld1sh_gather_s32offset_s32(svbool_t pg,const int16_t * base,svint32_t offsets)140 svint32_t test_svld1sh_gather_s32offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) {
141 // CHECK-LABEL: test_svld1sh_gather_s32offset_s32
142 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
143 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
144 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
145 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
146 return SVE_ACLE_FUNC(svld1sh_gather_, s32, offset_s32, )(pg, base, offsets);
147 }
148
test_svld1sh_gather_s64offset_s64(svbool_t pg,const int16_t * base,svint64_t offsets)149 svint64_t test_svld1sh_gather_s64offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) {
150 // CHECK-LABEL: test_svld1sh_gather_s64offset_s64
151 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
152 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
153 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
154 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
155 return SVE_ACLE_FUNC(svld1sh_gather_, s64, offset_s64, )(pg, base, offsets);
156 }
157
test_svld1sh_gather_s32offset_u32(svbool_t pg,const int16_t * base,svint32_t offsets)158 svuint32_t test_svld1sh_gather_s32offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) {
159 // CHECK-LABEL: test_svld1sh_gather_s32offset_u32
160 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
161 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
162 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
163 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
164 return SVE_ACLE_FUNC(svld1sh_gather_, s32, offset_u32, )(pg, base, offsets);
165 }
166
test_svld1sh_gather_s64offset_u64(svbool_t pg,const int16_t * base,svint64_t offsets)167 svuint64_t test_svld1sh_gather_s64offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) {
168 // CHECK-LABEL: test_svld1sh_gather_s64offset_u64
169 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
170 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
171 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
172 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
173 return SVE_ACLE_FUNC(svld1sh_gather_, s64, offset_u64, )(pg, base, offsets);
174 }
175
test_svld1sh_gather_u32offset_s32(svbool_t pg,const int16_t * base,svuint32_t offsets)176 svint32_t test_svld1sh_gather_u32offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) {
177 // CHECK-LABEL: test_svld1sh_gather_u32offset_s32
178 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
179 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
180 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
181 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
182 return SVE_ACLE_FUNC(svld1sh_gather_, u32, offset_s32, )(pg, base, offsets);
183 }
184
test_svld1sh_gather_u64offset_s64(svbool_t pg,const int16_t * base,svuint64_t offsets)185 svint64_t test_svld1sh_gather_u64offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) {
186 // CHECK-LABEL: test_svld1sh_gather_u64offset_s64
187 // CHECK: %[[PG]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
188 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
189 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
190 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
191 return SVE_ACLE_FUNC(svld1sh_gather_, u64, offset_s64, )(pg, base, offsets);
192 }
193
test_svld1sh_gather_u32offset_u32(svbool_t pg,const int16_t * base,svuint32_t offsets)194 svuint32_t test_svld1sh_gather_u32offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) {
195 // CHECK-LABEL: test_svld1sh_gather_u32offset_u32
196 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
197 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %offsets)
198 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
199 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
200 return SVE_ACLE_FUNC(svld1sh_gather_, u32, offset_u32, )(pg, base, offsets);
201 }
202
test_svld1sh_gather_u64offset_u64(svbool_t pg,const int16_t * base,svuint64_t offsets)203 svuint64_t test_svld1sh_gather_u64offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) {
204 // CHECK-LABEL: test_svld1sh_gather_u64offset_u64
205 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
206 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %offsets)
207 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
208 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
209 return SVE_ACLE_FUNC(svld1sh_gather_, u64, offset_u64, )(pg, base, offsets);
210 }
211
test_svld1sh_gather_u32base_offset_s32(svbool_t pg,svuint32_t bases,int64_t offset)212 svint32_t test_svld1sh_gather_u32base_offset_s32(svbool_t pg, svuint32_t bases, int64_t offset) {
213 // CHECK-LABEL: test_svld1sh_gather_u32base_offset_s32
214 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
215 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
216 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
217 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
218 return SVE_ACLE_FUNC(svld1sh_gather, _u32base, _offset_s32, )(pg, bases, offset);
219 }
220
test_svld1sh_gather_u64base_offset_s64(svbool_t pg,svuint64_t bases,int64_t offset)221 svint64_t test_svld1sh_gather_u64base_offset_s64(svbool_t pg, svuint64_t bases, int64_t offset) {
222 // CHECK-LABEL: test_svld1sh_gather_u64base_offset_s64
223 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
224 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
225 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
226 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
227 return SVE_ACLE_FUNC(svld1sh_gather, _u64base, _offset_s64, )(pg, bases, offset);
228 }
229
test_svld1sh_gather_u32base_offset_u32(svbool_t pg,svuint32_t bases,int64_t offset)230 svuint32_t test_svld1sh_gather_u32base_offset_u32(svbool_t pg, svuint32_t bases, int64_t offset) {
231 // CHECK-LABEL: test_svld1sh_gather_u32base_offset_u32
232 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
233 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
234 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
235 // CHECK: ret <vscale x 4 x i32> %2
236 return SVE_ACLE_FUNC(svld1sh_gather, _u32base, _offset_u32, )(pg, bases, offset);
237 }
238
test_svld1sh_gather_u64base_offset_u64(svbool_t pg,svuint64_t bases,int64_t offset)239 svuint64_t test_svld1sh_gather_u64base_offset_u64(svbool_t pg, svuint64_t bases, int64_t offset) {
240 // CHECK-LABEL: test_svld1sh_gather_u64base_offset_u64
241 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
242 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
243 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
244 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
245 return SVE_ACLE_FUNC(svld1sh_gather, _u64base, _offset_u64, )(pg, bases, offset);
246 }
247
test_svld1sh_gather_s32index_s32(svbool_t pg,const int16_t * base,svint32_t indices)248 svint32_t test_svld1sh_gather_s32index_s32(svbool_t pg, const int16_t *base, svint32_t indices) {
249 // CHECK-LABEL: test_svld1sh_gather_s32index_s32
250 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
251 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
252 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
253 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
254 return SVE_ACLE_FUNC(svld1sh_gather_, s32, index_s32, )(pg, base, indices);
255 }
256
test_svld1sh_gather_s64index_s64(svbool_t pg,const int16_t * base,svint64_t indices)257 svint64_t test_svld1sh_gather_s64index_s64(svbool_t pg, const int16_t *base, svint64_t indices) {
258 // CHECK-LABEL: test_svld1sh_gather_s64index_s64
259 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
260 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
261 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
262 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
263 return SVE_ACLE_FUNC(svld1sh_gather_, s64, index_s64, )(pg, base, indices);
264 }
265
test_svld1sh_gather_s32index_u32(svbool_t pg,const int16_t * base,svint32_t indices)266 svuint32_t test_svld1sh_gather_s32index_u32(svbool_t pg, const int16_t *base, svint32_t indices) {
267 // CHECK-LABEL: test_svld1sh_gather_s32index_u32
268 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
269 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
270 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
271 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
272 return SVE_ACLE_FUNC(svld1sh_gather_, s32, index_u32, )(pg, base, indices);
273 }
274
test_svld1sh_gather_s64index_u64(svbool_t pg,const int16_t * base,svint64_t indices)275 svuint64_t test_svld1sh_gather_s64index_u64(svbool_t pg, const int16_t *base, svint64_t indices) {
276 // CHECK-LABEL: test_svld1sh_gather_s64index_u64
277 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
278 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
279 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
280 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
281 return SVE_ACLE_FUNC(svld1sh_gather_, s64, index_u64, )(pg, base, indices);
282 }
283
test_svld1sh_gather_u32index_s32(svbool_t pg,const int16_t * base,svuint32_t indices)284 svint32_t test_svld1sh_gather_u32index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) {
285 // CHECK-LABEL: test_svld1sh_gather_u32index_s32
286 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
287 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
288 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
289 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
290 return SVE_ACLE_FUNC(svld1sh_gather_, u32, index_s32, )(pg, base, indices);
291 }
292
test_svld1sh_gather_u64index_s64(svbool_t pg,const int16_t * base,svuint64_t indices)293 svint64_t test_svld1sh_gather_u64index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) {
294 // CHECK-LABEL: test_svld1sh_gather_u64index_s64
295 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
296 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
297 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
298 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
299 return SVE_ACLE_FUNC(svld1sh_gather_, u64, index_s64, )(pg, base, indices);
300 }
301
test_svld1sh_gather_u32index_u32(svbool_t pg,const int16_t * base,svuint32_t indices)302 svuint32_t test_svld1sh_gather_u32index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) {
303 // CHECK-LABEL: test_svld1sh_gather_u32index_u32
304 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
305 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base, <vscale x 4 x i32> %indices)
306 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
307 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
308 return SVE_ACLE_FUNC(svld1sh_gather_, u32, index_u32, )(pg, base, indices);
309 }
310
test_svld1sh_gather_u64index_u64(svbool_t pg,const int16_t * base,svuint64_t indices)311 svuint64_t test_svld1sh_gather_u64index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) {
312 // CHECK-LABEL: test_svld1sh_gather_u64index_u64
313 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
314 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base, <vscale x 2 x i64> %indices)
315 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
316 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
317 return SVE_ACLE_FUNC(svld1sh_gather_, u64, index_u64, )(pg, base, indices);
318 }
319
test_svld1sh_gather_u32base_index_s32(svbool_t pg,svuint32_t bases,int64_t index)320 svint32_t test_svld1sh_gather_u32base_index_s32(svbool_t pg, svuint32_t bases, int64_t index) {
321 // CHECK-LABEL: test_svld1sh_gather_u32base_index_s32
322 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
323 // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
324 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
325 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
326 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
327 return SVE_ACLE_FUNC(svld1sh_gather, _u32base, _index_s32, )(pg, bases, index);
328 }
329
test_svld1sh_gather_u64base_index_s64(svbool_t pg,svuint64_t bases,int64_t index)330 svint64_t test_svld1sh_gather_u64base_index_s64(svbool_t pg, svuint64_t bases, int64_t index) {
331 // CHECK-LABEL: test_svld1sh_gather_u64base_index_s64
332 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
333 // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
334 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
335 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
336 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
337 return SVE_ACLE_FUNC(svld1sh_gather, _u64base, _index_s64, )(pg, bases, index);
338 }
339
test_svld1sh_gather_u32base_index_u32(svbool_t pg,svuint32_t bases,int64_t index)340 svuint32_t test_svld1sh_gather_u32base_index_u32(svbool_t pg, svuint32_t bases, int64_t index) {
341 // CHECK-LABEL: test_svld1sh_gather_u32base_index_u32
342 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
343 // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
344 // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
345 // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32>
346 // CHECK: ret <vscale x 4 x i32> %[[SEXT]]
347 return SVE_ACLE_FUNC(svld1sh_gather, _u32base, _index_u32, )(pg, bases, index);
348 }
349
test_svld1sh_gather_u64base_index_u64(svbool_t pg,svuint64_t bases,int64_t index)350 svuint64_t test_svld1sh_gather_u64base_index_u64(svbool_t pg, svuint64_t bases, int64_t index) {
351 // CHECK-LABEL: test_svld1sh_gather_u64base_index_u64
352 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
353 // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 1
354 // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
355 // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64>
356 // CHECK: ret <vscale x 2 x i64> %[[SEXT]]
357 return SVE_ACLE_FUNC(svld1sh_gather, _u64base, _index_u64, )(pg, bases, index);
358 }
359