1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null
7 #include <arm_sve.h>
8 
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15 
test_svld1_s8(svbool_t pg,const int8_t * base)16 svint8_t test_svld1_s8(svbool_t pg, const int8_t *base)
17 {
18   // CHECK-LABEL: test_svld1_s8
19   // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
20   // CHECK: ret <vscale x 16 x i8> %[[LOAD]]
21   return SVE_ACLE_FUNC(svld1,_s8,,)(pg, base);
22 }
23 
test_svld1_s16(svbool_t pg,const int16_t * base)24 svint16_t test_svld1_s16(svbool_t pg, const int16_t *base)
25 {
26   // CHECK-LABEL: test_svld1_s16
27   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
28   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %base)
29   // CHECK: ret <vscale x 8 x i16> %[[LOAD]]
30   return SVE_ACLE_FUNC(svld1,_s16,,)(pg, base);
31 }
32 
test_svld1_s32(svbool_t pg,const int32_t * base)33 svint32_t test_svld1_s32(svbool_t pg, const int32_t *base)
34 {
35   // CHECK-LABEL: test_svld1_s32
36   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
37   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base)
38   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
39   return SVE_ACLE_FUNC(svld1,_s32,,)(pg, base);
40 }
41 
test_svld1_s64(svbool_t pg,const int64_t * base)42 svint64_t test_svld1_s64(svbool_t pg, const int64_t *base)
43 {
44   // CHECK-LABEL: test_svld1_s64
45   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
46   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base)
47   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
48   return SVE_ACLE_FUNC(svld1,_s64,,)(pg, base);
49 }
50 
test_svld1_u8(svbool_t pg,const uint8_t * base)51 svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base)
52 {
53   // CHECK-LABEL: test_svld1_u8
54   // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
55   // CHECK: ret <vscale x 16 x i8> %[[LOAD]]
56   return SVE_ACLE_FUNC(svld1,_u8,,)(pg, base);
57 }
58 
test_svld1_u16(svbool_t pg,const uint16_t * base)59 svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base)
60 {
61   // CHECK-LABEL: test_svld1_u16
62   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
63   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %base)
64   // CHECK: ret <vscale x 8 x i16> %[[LOAD]]
65   return SVE_ACLE_FUNC(svld1,_u16,,)(pg, base);
66 }
67 
test_svld1_u32(svbool_t pg,const uint32_t * base)68 svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base)
69 {
70   // CHECK-LABEL: test_svld1_u32
71   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
72   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base)
73   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
74   return SVE_ACLE_FUNC(svld1,_u32,,)(pg, base);
75 }
76 
test_svld1_u64(svbool_t pg,const uint64_t * base)77 svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base)
78 {
79   // CHECK-LABEL: test_svld1_u64
80   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
81   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base)
82   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
83   return SVE_ACLE_FUNC(svld1,_u64,,)(pg, base);
84 }
85 
test_svld1_f16(svbool_t pg,const float16_t * base)86 svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base)
87 {
88   // CHECK-LABEL: test_svld1_f16
89   // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
90   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %[[PG]], half* %base)
91   // CHECK: ret <vscale x 8 x half> %[[LOAD]]
92   return SVE_ACLE_FUNC(svld1,_f16,,)(pg, base);
93 }
94 
test_svld1_f32(svbool_t pg,const float32_t * base)95 svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base)
96 {
97   // CHECK-LABEL: test_svld1_f32
98   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
99   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %base)
100   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
101   return SVE_ACLE_FUNC(svld1,_f32,,)(pg, base);
102 }
103 
test_svld1_f64(svbool_t pg,const float64_t * base)104 svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base)
105 {
106   // CHECK-LABEL: test_svld1_f64
107   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
108   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %base)
109   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
110   return SVE_ACLE_FUNC(svld1,_f64,,)(pg, base);
111 }
112 
test_svld1_vnum_s8(svbool_t pg,const int8_t * base,int64_t vnum)113 svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum)
114 {
115   // CHECK-LABEL: test_svld1_vnum_s8
116   // CHECK: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 16 x i8>*
117   // CHECK: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BITCAST]], i64 %vnum, i64 0
118   // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %[[GEP]])
119   // CHECK: ret <vscale x 16 x i8> %[[LOAD]]
120   return SVE_ACLE_FUNC(svld1_vnum,_s8,,)(pg, base, vnum);
121 }
122 
test_svld1_vnum_s16(svbool_t pg,const int16_t * base,int64_t vnum)123 svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum)
124 {
125   // CHECK-LABEL: test_svld1_vnum_s16
126   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
127   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 8 x i16>*
128   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BITCAST]], i64 %vnum, i64 0
129   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %[[GEP]])
130   // CHECK: ret <vscale x 8 x i16> %[[LOAD]]
131   return SVE_ACLE_FUNC(svld1_vnum,_s16,,)(pg, base, vnum);
132 }
133 
test_svld1_vnum_s32(svbool_t pg,const int32_t * base,int64_t vnum)134 svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum)
135 {
136   // CHECK-LABEL: test_svld1_vnum_s32
137   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
138   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 4 x i32>*
139   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BITCAST]], i64 %vnum, i64 0
140   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %[[GEP]])
141   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
142   return SVE_ACLE_FUNC(svld1_vnum,_s32,,)(pg, base, vnum);
143 }
144 
test_svld1_vnum_s64(svbool_t pg,const int64_t * base,int64_t vnum)145 svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum)
146 {
147   // CHECK-LABEL: test_svld1_vnum_s64
148   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
149   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i64* %base to <vscale x 2 x i64>*
150   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BITCAST]], i64 %vnum, i64 0
151   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %[[GEP]])
152   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
153   return SVE_ACLE_FUNC(svld1_vnum,_s64,,)(pg, base, vnum);
154 }
155 
test_svld1_vnum_u8(svbool_t pg,const uint8_t * base,int64_t vnum)156 svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum)
157 {
158   // CHECK-LABEL: test_svld1_vnum_u8
159   // CHECK: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 16 x i8>*
160   // CHECK: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BITCAST]], i64 %vnum, i64 0
161   // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %[[GEP]])
162   // CHECK: ret <vscale x 16 x i8> %[[LOAD]]
163   return SVE_ACLE_FUNC(svld1_vnum,_u8,,)(pg, base, vnum);
164 }
165 
test_svld1_vnum_u16(svbool_t pg,const uint16_t * base,int64_t vnum)166 svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
167 {
168   // CHECK-LABEL: test_svld1_vnum_u16
169   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
170   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 8 x i16>*
171   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BITCAST]], i64 %vnum, i64 0
172   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %[[GEP]])
173   // CHECK: ret <vscale x 8 x i16> %[[LOAD]]
174   return SVE_ACLE_FUNC(svld1_vnum,_u16,,)(pg, base, vnum);
175 }
176 
test_svld1_vnum_u32(svbool_t pg,const uint32_t * base,int64_t vnum)177 svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
178 {
179   // CHECK-LABEL: test_svld1_vnum_u32
180   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
181   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 4 x i32>*
182   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BITCAST]], i64 %vnum, i64 0
183   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %[[GEP]])
184   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
185   return SVE_ACLE_FUNC(svld1_vnum,_u32,,)(pg, base, vnum);
186 }
187 
test_svld1_vnum_u64(svbool_t pg,const uint64_t * base,int64_t vnum)188 svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
189 {
190   // CHECK-LABEL: test_svld1_vnum_u64
191   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
192   // CHECK-DAG: %[[BITCAST:.*]] = bitcast i64* %base to <vscale x 2 x i64>*
193   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BITCAST]], i64 %vnum, i64 0
194   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %[[GEP]])
195   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
196   return SVE_ACLE_FUNC(svld1_vnum,_u64,,)(pg, base, vnum);
197 }
198 
test_svld1_vnum_f16(svbool_t pg,const float16_t * base,int64_t vnum)199 svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum)
200 {
201   // CHECK-LABEL: test_svld1_vnum_f16
202   // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
203   // CHECK-DAG: %[[BITCAST:.*]] = bitcast half* %base to <vscale x 8 x half>*
204   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %[[BITCAST]], i64 %vnum, i64 0
205   // CHECK: %[[LOAD:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %[[PG]], half* %[[GEP]])
206   // CHECK: ret <vscale x 8 x half> %[[LOAD]]
207   return SVE_ACLE_FUNC(svld1_vnum,_f16,,)(pg, base, vnum);
208 }
209 
test_svld1_vnum_f32(svbool_t pg,const float32_t * base,int64_t vnum)210 svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum)
211 {
212   // CHECK-LABEL: test_svld1_vnum_f32
213   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
214   // CHECK-DAG: %[[BITCAST:.*]] = bitcast float* %base to <vscale x 4 x float>*
215   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %[[BITCAST]], i64 %vnum, i64 0
216   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %[[GEP]])
217   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
218   return SVE_ACLE_FUNC(svld1_vnum,_f32,,)(pg, base, vnum);
219 }
220 
test_svld1_vnum_f64(svbool_t pg,const float64_t * base,int64_t vnum)221 svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum)
222 {
223   // CHECK-LABEL: test_svld1_vnum_f64
224   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
225   // CHECK-DAG: %[[BITCAST:.*]] = bitcast double* %base to <vscale x 2 x double>*
226   // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %[[BITCAST]], i64 %vnum, i64 0
227   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %[[GEP]])
228   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
229   return SVE_ACLE_FUNC(svld1_vnum,_f64,,)(pg, base, vnum);
230 }
231 
test_svld1_gather_u32base_s32(svbool_t pg,svuint32_t bases)232 svint32_t test_svld1_gather_u32base_s32(svbool_t pg, svuint32_t bases) {
233   // CHECK-LABEL: test_svld1_gather_u32base_s32
234   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
235   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
236   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
237   return SVE_ACLE_FUNC(svld1_gather, _u32base, _s32, )(pg, bases);
238 }
239 
test_svld1_gather_u64base_s64(svbool_t pg,svuint64_t bases)240 svint64_t test_svld1_gather_u64base_s64(svbool_t pg, svuint64_t bases) {
241   // CHECK-LABEL: test_svld1_gather_u64base_s64
242   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
243   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
244   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
245   return SVE_ACLE_FUNC(svld1_gather, _u64base, _s64, )(pg, bases);
246 }
247 
test_svld1_gather_u32base_u32(svbool_t pg,svuint32_t bases)248 svuint32_t test_svld1_gather_u32base_u32(svbool_t pg, svuint32_t bases) {
249   // CHECK-LABEL: test_svld1_gather_u32base_u32
250   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
251   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
252   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
253   return SVE_ACLE_FUNC(svld1_gather, _u32base, _u32, )(pg, bases);
254 }
255 
test_svld1_gather_u64base_u64(svbool_t pg,svuint64_t bases)256 svuint64_t test_svld1_gather_u64base_u64(svbool_t pg, svuint64_t bases) {
257   // CHECK-LABEL: test_svld1_gather_u64base_u64
258   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
259   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
260   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
261   return SVE_ACLE_FUNC(svld1_gather, _u64base, _u64, )(pg, bases);
262 }
263 
test_svld1_gather_u32base_f32(svbool_t pg,svuint32_t bases)264 svfloat32_t test_svld1_gather_u32base_f32(svbool_t pg, svuint32_t bases) {
265   // CHECK-LABEL: test_svld1_gather_u32base_f32
266   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
267   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 0)
268   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
269   return SVE_ACLE_FUNC(svld1_gather, _u32base, _f32, )(pg, bases);
270 }
271 
test_svld1_gather_u64base_f64(svbool_t pg,svuint64_t bases)272 svfloat64_t test_svld1_gather_u64base_f64(svbool_t pg, svuint64_t bases) {
273   // CHECK-LABEL: test_svld1_gather_u64base_f64
274   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
275   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 0)
276   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
277   return SVE_ACLE_FUNC(svld1_gather, _u64base, _f64, )(pg, bases);
278 }
279 
test_svld1_gather_s32offset_s32(svbool_t pg,const int32_t * base,svint32_t offsets)280 svint32_t test_svld1_gather_s32offset_s32(svbool_t pg, const int32_t *base, svint32_t offsets) {
281   // CHECK-LABEL: test_svld1_gather_s32offset_s32
282   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
283   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %offsets)
284   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
285   return SVE_ACLE_FUNC(svld1_gather_, s32, offset, _s32)(pg, base, offsets);
286 }
287 
test_svld1_gather_s64offset_s64(svbool_t pg,const int64_t * base,svint64_t offsets)288 svint64_t test_svld1_gather_s64offset_s64(svbool_t pg, const int64_t *base, svint64_t offsets) {
289   // CHECK-LABEL: test_svld1_gather_s64offset_s64
290   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
291   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %offsets)
292   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
293   return SVE_ACLE_FUNC(svld1_gather_, s64, offset, _s64)(pg, base, offsets);
294 }
295 
test_svld1_gather_s32offset_u32(svbool_t pg,const uint32_t * base,svint32_t offsets)296 svuint32_t test_svld1_gather_s32offset_u32(svbool_t pg, const uint32_t *base, svint32_t offsets) {
297   // CHECK-LABEL: test_svld1_gather_s32offset_u32
298   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
299   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %offsets)
300   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
301   return SVE_ACLE_FUNC(svld1_gather_, s32, offset, _u32)(pg, base, offsets);
302 }
303 
test_svld1_gather_s64offset_u64(svbool_t pg,const uint64_t * base,svint64_t offsets)304 svuint64_t test_svld1_gather_s64offset_u64(svbool_t pg, const uint64_t *base, svint64_t offsets) {
305   // CHECK-LABEL: test_svld1_gather_s64offset_u64
306   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
307   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %offsets)
308   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
309   return SVE_ACLE_FUNC(svld1_gather_, s64, offset, _u64)(pg, base, offsets);
310 }
311 
test_svld1_gather_s32offset_f32(svbool_t pg,const float32_t * base,svint32_t offsets)312 svfloat32_t test_svld1_gather_s32offset_f32(svbool_t pg, const float32_t *base, svint32_t offsets) {
313   // CHECK-LABEL: test_svld1_gather_s32offset_f32
314   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
315   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %base, <vscale x 4 x i32> %offsets)
316   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
317   return SVE_ACLE_FUNC(svld1_gather_, s32, offset, _f32)(pg, base, offsets);
318 }
319 
test_svld1_gather_s64offset_f64(svbool_t pg,const float64_t * base,svint64_t offsets)320 svfloat64_t test_svld1_gather_s64offset_f64(svbool_t pg, const float64_t *base, svint64_t offsets) {
321   // CHECK-LABEL: test_svld1_gather_s64offset_f64
322   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
323   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %base, <vscale x 2 x i64> %offsets)
324   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
325   return SVE_ACLE_FUNC(svld1_gather_, s64, offset, _f64)(pg, base, offsets);
326 }
327 
test_svld1_gather_u32offset_s32(svbool_t pg,const int32_t * base,svuint32_t offsets)328 svint32_t test_svld1_gather_u32offset_s32(svbool_t pg, const int32_t *base, svuint32_t offsets) {
329   // CHECK-LABEL: test_svld1_gather_u32offset_s32
330   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
331   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %offsets)
332   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
333   return SVE_ACLE_FUNC(svld1_gather_, u32, offset, _s32)(pg, base, offsets);
334 }
335 
test_svld1_gather_u64offset_s64(svbool_t pg,const int64_t * base,svuint64_t offsets)336 svint64_t test_svld1_gather_u64offset_s64(svbool_t pg, const int64_t *base, svuint64_t offsets) {
337   // CHECK-LABEL: test_svld1_gather_u64offset_s64
338   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
339   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %offsets)
340   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
341   return SVE_ACLE_FUNC(svld1_gather_, u64, offset, _s64)(pg, base, offsets);
342 }
343 
test_svld1_gather_u32offset_u32(svbool_t pg,const uint32_t * base,svuint32_t offsets)344 svuint32_t test_svld1_gather_u32offset_u32(svbool_t pg, const uint32_t *base, svuint32_t offsets) {
345   // CHECK-LABEL: test_svld1_gather_u32offset_u32
346   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
347   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %offsets)
348   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
349   return SVE_ACLE_FUNC(svld1_gather_, u32, offset, _u32)(pg, base, offsets);
350 }
351 
test_svld1_gather_u64offset_u64(svbool_t pg,const uint64_t * base,svuint64_t offsets)352 svuint64_t test_svld1_gather_u64offset_u64(svbool_t pg, const uint64_t *base, svuint64_t offsets) {
353   // CHECK-LABEL: test_svld1_gather_u64offset_u64
354   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
355   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %offsets)
356   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
357   return SVE_ACLE_FUNC(svld1_gather_, u64, offset, _u64)(pg, base, offsets);
358 }
359 
test_svld1_gather_u32offset_f32(svbool_t pg,const float32_t * base,svuint32_t offsets)360 svfloat32_t test_svld1_gather_u32offset_f32(svbool_t pg, const float32_t *base, svuint32_t offsets) {
361   // CHECK-LABEL: test_svld1_gather_u32offset_f32
362   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
363   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %base, <vscale x 4 x i32> %offsets)
364   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
365   return SVE_ACLE_FUNC(svld1_gather_, u32, offset, _f32)(pg, base, offsets);
366 }
367 
test_svld1_gather_u64offset_f64(svbool_t pg,const float64_t * base,svuint64_t offsets)368 svfloat64_t test_svld1_gather_u64offset_f64(svbool_t pg, const float64_t *base, svuint64_t offsets) {
369   // CHECK-LABEL: test_svld1_gather_u64offset_f64
370   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
371   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %base, <vscale x 2 x i64> %offsets)
372   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
373   return SVE_ACLE_FUNC(svld1_gather_, u64, offset, _f64)(pg, base, offsets);
374 }
375 
test_svld1_gather_u32base_offset_s32(svbool_t pg,svuint32_t bases,int64_t offset)376 svint32_t test_svld1_gather_u32base_offset_s32(svbool_t pg, svuint32_t bases, int64_t offset) {
377   // CHECK-LABEL: test_svld1_gather_u32base_offset_s32
378   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
379   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
380   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
381   return SVE_ACLE_FUNC(svld1_gather, _u32base, _offset_s32, )(pg, bases, offset);
382 }
383 
test_svld1_gather_u64base_offset_s64(svbool_t pg,svuint64_t bases,int64_t offset)384 svint64_t test_svld1_gather_u64base_offset_s64(svbool_t pg, svuint64_t bases, int64_t offset) {
385   // CHECK-LABEL: test_svld1_gather_u64base_offset_s64
386   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
387   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
388   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
389   return SVE_ACLE_FUNC(svld1_gather, _u64base, _offset_s64, )(pg, bases, offset);
390 }
391 
test_svld1_gather_u32base_offset_u32(svbool_t pg,svuint32_t bases,int64_t offset)392 svuint32_t test_svld1_gather_u32base_offset_u32(svbool_t pg, svuint32_t bases, int64_t offset) {
393   // CHECK-LABEL: test_svld1_gather_u32base_offset_u32
394   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
395   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
396   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
397   return SVE_ACLE_FUNC(svld1_gather, _u32base, _offset_u32, )(pg, bases, offset);
398 }
399 
test_svld1_gather_u64base_offset_u64(svbool_t pg,svuint64_t bases,int64_t offset)400 svuint64_t test_svld1_gather_u64base_offset_u64(svbool_t pg, svuint64_t bases, int64_t offset) {
401   // CHECK-LABEL: test_svld1_gather_u64base_offset_u64
402   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
403   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
404   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
405   return SVE_ACLE_FUNC(svld1_gather, _u64base, _offset_u64, )(pg, bases, offset);
406 }
407 
test_svld1_gather_u32base_offset_f32(svbool_t pg,svuint32_t bases,int64_t offset)408 svfloat32_t test_svld1_gather_u32base_offset_f32(svbool_t pg, svuint32_t bases, int64_t offset) {
409   // CHECK-LABEL: test_svld1_gather_u32base_offset_f32
410   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
411   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %offset)
412   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
413   return SVE_ACLE_FUNC(svld1_gather, _u32base, _offset_f32, )(pg, bases, offset);
414 }
415 
test_svld1_gather_u64base_offset_f64(svbool_t pg,svuint64_t bases,int64_t offset)416 svfloat64_t test_svld1_gather_u64base_offset_f64(svbool_t pg, svuint64_t bases, int64_t offset) {
417   // CHECK-LABEL: test_svld1_gather_u64base_offset_f64
418   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
419   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %offset)
420   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
421   return SVE_ACLE_FUNC(svld1_gather, _u64base, _offset_f64, )(pg, bases, offset);
422 }
423 
test_svld1_gather_s32index_s32(svbool_t pg,const int32_t * base,svint32_t indices)424 svint32_t test_svld1_gather_s32index_s32(svbool_t pg, const int32_t *base, svint32_t indices) {
425   // CHECK-LABEL: test_svld1_gather_s32index_s32
426   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
427   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %indices)
428   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
429   return SVE_ACLE_FUNC(svld1_gather_, s32, index, _s32)(pg, base, indices);
430 }
431 
test_svld1_gather_s64index_s64(svbool_t pg,const int64_t * base,svint64_t indices)432 svint64_t test_svld1_gather_s64index_s64(svbool_t pg, const int64_t *base, svint64_t indices) {
433   // CHECK-LABEL: test_svld1_gather_s64index_s64
434   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
435   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %indices)
436   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
437   return SVE_ACLE_FUNC(svld1_gather_, s64, index, _s64)(pg, base, indices);
438 }
439 
test_svld1_gather_s32index_u32(svbool_t pg,const uint32_t * base,svint32_t indices)440 svuint32_t test_svld1_gather_s32index_u32(svbool_t pg, const uint32_t *base, svint32_t indices) {
441   // CHECK-LABEL: test_svld1_gather_s32index_u32
442   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
443   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %indices)
444   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
445   return SVE_ACLE_FUNC(svld1_gather_, s32, index, _u32)(pg, base, indices);
446 }
447 
test_svld1_gather_s64index_u64(svbool_t pg,const uint64_t * base,svint64_t indices)448 svuint64_t test_svld1_gather_s64index_u64(svbool_t pg, const uint64_t *base, svint64_t indices) {
449   // CHECK-LABEL: test_svld1_gather_s64index_u64
450   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
451   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %indices)
452   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
453   return SVE_ACLE_FUNC(svld1_gather_, s64, index, _u64)(pg, base, indices);
454 }
455 
test_svld1_gather_s32index_f32(svbool_t pg,const float32_t * base,svint32_t indices)456 svfloat32_t test_svld1_gather_s32index_f32(svbool_t pg, const float32_t *base, svint32_t indices) {
457   // CHECK-LABEL: test_svld1_gather_s32index_f32
458   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
459   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %base, <vscale x 4 x i32> %indices)
460   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
461   return SVE_ACLE_FUNC(svld1_gather_, s32, index, _f32)(pg, base, indices);
462 }
463 
test_svld1_gather_s64index_f64(svbool_t pg,const float64_t * base,svint64_t indices)464 svfloat64_t test_svld1_gather_s64index_f64(svbool_t pg, const float64_t *base, svint64_t indices) {
465   // CHECK-LABEL: test_svld1_gather_s64index_f64
466   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
467   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %base, <vscale x 2 x i64> %indices)
468   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
469   return SVE_ACLE_FUNC(svld1_gather_, s64, index, _f64)(pg, base, indices);
470 }
471 
test_svld1_gather_u32index_s32(svbool_t pg,const int32_t * base,svuint32_t indices)472 svint32_t test_svld1_gather_u32index_s32(svbool_t pg, const int32_t *base, svuint32_t indices) {
473   // CHECK-LABEL: test_svld1_gather_u32index_s32
474   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
475   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %indices)
476   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
477   return SVE_ACLE_FUNC(svld1_gather_, u32, index, _s32)(pg, base, indices);
478 }
479 
test_svld1_gather_u64index_s64(svbool_t pg,const int64_t * base,svuint64_t indices)480 svint64_t test_svld1_gather_u64index_s64(svbool_t pg, const int64_t *base, svuint64_t indices) {
481   // CHECK-LABEL: test_svld1_gather_u64index_s64
482   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
483   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %indices)
484   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
485   return SVE_ACLE_FUNC(svld1_gather_, u64, index, _s64)(pg, base, indices);
486 }
487 
test_svld1_gather_u32index_u32(svbool_t pg,const uint32_t * base,svuint32_t indices)488 svuint32_t test_svld1_gather_u32index_u32(svbool_t pg, const uint32_t *base, svuint32_t indices) {
489   // CHECK-LABEL: test_svld1_gather_u32index_u32
490   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
491   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base, <vscale x 4 x i32> %indices)
492   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
493   return SVE_ACLE_FUNC(svld1_gather_, u32, index, _u32)(pg, base, indices);
494 }
495 
test_svld1_gather_u64index_u64(svbool_t pg,const uint64_t * base,svuint64_t indices)496 svuint64_t test_svld1_gather_u64index_u64(svbool_t pg, const uint64_t *base, svuint64_t indices) {
497   // CHECK-LABEL: test_svld1_gather_u64index_u64
498   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
499   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base, <vscale x 2 x i64> %indices)
500   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
501   return SVE_ACLE_FUNC(svld1_gather_, u64, index, _u64)(pg, base, indices);
502 }
503 
test_svld1_gather_u32index_f32(svbool_t pg,const float32_t * base,svuint32_t indices)504 svfloat32_t test_svld1_gather_u32index_f32(svbool_t pg, const float32_t *base, svuint32_t indices) {
505   // CHECK-LABEL: test_svld1_gather_u32index_f32
506   // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
507   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %base, <vscale x 4 x i32> %indices)
508   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
509   return SVE_ACLE_FUNC(svld1_gather_, u32, index, _f32)(pg, base, indices);
510 }
511 
test_svld1_gather_u64index_f64(svbool_t pg,const float64_t * base,svuint64_t indices)512 svfloat64_t test_svld1_gather_u64index_f64(svbool_t pg, const float64_t *base, svuint64_t indices) {
513   // CHECK-LABEL: test_svld1_gather_u64index_f64
514   // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
515   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %base, <vscale x 2 x i64> %indices)
516   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
517   return SVE_ACLE_FUNC(svld1_gather_, u64, index, _f64)(pg, base, indices);
518 }
519 
test_svld1_gather_u32base_index_s32(svbool_t pg,svuint32_t bases,int64_t index)520 svint32_t test_svld1_gather_u32base_index_s32(svbool_t pg, svuint32_t bases, int64_t index) {
521   // CHECK-LABEL: test_svld1_gather_u32base_index_s32
522   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
523   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 2
524   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
525   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
526   return SVE_ACLE_FUNC(svld1_gather, _u32base, _index_s32, )(pg, bases, index);
527 }
528 
test_svld1_gather_u64base_index_s64(svbool_t pg,svuint64_t bases,int64_t index)529 svint64_t test_svld1_gather_u64base_index_s64(svbool_t pg, svuint64_t bases, int64_t index) {
530   // CHECK-LABEL: test_svld1_gather_u64base_index_s64
531   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
532   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 3
533   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
534   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
535   return SVE_ACLE_FUNC(svld1_gather, _u64base, _index_s64, )(pg, bases, index);
536 }
537 
test_svld1_gather_u32base_index_u32(svbool_t pg,svuint32_t bases,int64_t index)538 svuint32_t test_svld1_gather_u32base_index_u32(svbool_t pg, svuint32_t bases, int64_t index) {
539   // CHECK-LABEL: test_svld1_gather_u32base_index_u32
540   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
541   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 2
542   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
543   // CHECK: ret <vscale x 4 x i32> %[[LOAD]]
544   return SVE_ACLE_FUNC(svld1_gather, _u32base, _index_u32, )(pg, bases, index);
545 }
546 
test_svld1_gather_u64base_index_u64(svbool_t pg,svuint64_t bases,int64_t index)547 svuint64_t test_svld1_gather_u64base_index_u64(svbool_t pg, svuint64_t bases, int64_t index) {
548   // CHECK-LABEL: test_svld1_gather_u64base_index_u64
549   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
550   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 3
551   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
552   // CHECK: ret <vscale x 2 x i64> %[[LOAD]]
553   return SVE_ACLE_FUNC(svld1_gather, _u64base, _index_u64, )(pg, bases, index);
554 }
555 
test_svld1_gather_u32base_index_f32(svbool_t pg,svuint32_t bases,int64_t index)556 svfloat32_t test_svld1_gather_u32base_index_f32(svbool_t pg, svuint32_t bases, int64_t index) {
557   // CHECK-LABEL: test_svld1_gather_u32base_index_f32
558   // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
559   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 2
560   // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %bases, i64 %[[SHL]])
561   // CHECK: ret <vscale x 4 x float> %[[LOAD]]
562   return SVE_ACLE_FUNC(svld1_gather, _u32base, _index_f32, )(pg, bases, index);
563 }
564 
test_svld1_gather_u64base_index_f64(svbool_t pg,svuint64_t bases,int64_t index)565 svfloat64_t test_svld1_gather_u64base_index_f64(svbool_t pg, svuint64_t bases, int64_t index) {
566   // CHECK-LABEL: test_svld1_gather_u64base_index_f64
567   // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
568   // CHECK-DAG: %[[SHL:.*]] = shl i64 %index, 3
569   // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %bases, i64 %[[SHL]])
570   // CHECK: ret <vscale x 2 x double> %[[LOAD]]
571   return SVE_ACLE_FUNC(svld1_gather, _u64base, _index_f64, )(pg, bases, index);
572 }
573