1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null
7 #include <arm_sve.h>
8 
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15 
test_svdupq_lane_s8(svint8_t data,uint64_t index)16 svint8_t test_svdupq_lane_s8(svint8_t data, uint64_t index)
17 {
18   // CHECK-LABEL: test_svdupq_lane_s8
19   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %data, i64 %index)
20   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
21   return SVE_ACLE_FUNC(svdupq_lane,_s8,,)(data, index);
22 }
23 
test_svdupq_lane_s16(svint16_t data,uint64_t index)24 svint16_t test_svdupq_lane_s16(svint16_t data, uint64_t index)
25 {
26   // CHECK-LABEL: test_svdupq_lane_s16
27   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %data, i64 %index)
28   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
29   return SVE_ACLE_FUNC(svdupq_lane,_s16,,)(data, index);
30 }
31 
test_svdupq_lane_s32(svint32_t data,uint64_t index)32 svint32_t test_svdupq_lane_s32(svint32_t data, uint64_t index)
33 {
34   // CHECK-LABEL: test_svdupq_lane_s32
35   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %data, i64 %index)
36   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
37   return SVE_ACLE_FUNC(svdupq_lane,_s32,,)(data, index);
38 }
39 
test_svdupq_lane_s64(svint64_t data,uint64_t index)40 svint64_t test_svdupq_lane_s64(svint64_t data, uint64_t index)
41 {
42   // CHECK-LABEL: test_svdupq_lane_s64
43   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %data, i64 %index)
44   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
45   return SVE_ACLE_FUNC(svdupq_lane,_s64,,)(data, index);
46 }
47 
test_svdupq_lane_u8(svuint8_t data,uint64_t index)48 svuint8_t test_svdupq_lane_u8(svuint8_t data, uint64_t index)
49 {
50   // CHECK-LABEL: test_svdupq_lane_u8
51   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %data, i64 %index)
52   // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
53   return SVE_ACLE_FUNC(svdupq_lane,_u8,,)(data, index);
54 }
55 
test_svdupq_lane_u16(svuint16_t data,uint64_t index)56 svuint16_t test_svdupq_lane_u16(svuint16_t data, uint64_t index)
57 {
58   // CHECK-LABEL: test_svdupq_lane_u16
59   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %data, i64 %index)
60   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
61   return SVE_ACLE_FUNC(svdupq_lane,_u16,,)(data, index);
62 }
63 
test_svdupq_lane_u32(svuint32_t data,uint64_t index)64 svuint32_t test_svdupq_lane_u32(svuint32_t data, uint64_t index)
65 {
66   // CHECK-LABEL: test_svdupq_lane_u32
67   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %data, i64 %index)
68   // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
69   return SVE_ACLE_FUNC(svdupq_lane,_u32,,)(data, index);
70 }
71 
test_svdupq_lane_u64(svuint64_t data,uint64_t index)72 svuint64_t test_svdupq_lane_u64(svuint64_t data, uint64_t index)
73 {
74   // CHECK-LABEL: test_svdupq_lane_u64
75   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %data, i64 %index)
76   // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
77   return SVE_ACLE_FUNC(svdupq_lane,_u64,,)(data, index);
78 }
79 
test_svdupq_lane_f16(svfloat16_t data,uint64_t index)80 svfloat16_t test_svdupq_lane_f16(svfloat16_t data, uint64_t index)
81 {
82   // CHECK-LABEL: test_svdupq_lane_f16
83   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %data, i64 %index)
84   // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
85   return SVE_ACLE_FUNC(svdupq_lane,_f16,,)(data, index);
86 }
87 
test_svdupq_lane_f32(svfloat32_t data,uint64_t index)88 svfloat32_t test_svdupq_lane_f32(svfloat32_t data, uint64_t index)
89 {
90   // CHECK-LABEL: test_svdupq_lane_f32
91   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %data, i64 %index)
92   // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
93   return SVE_ACLE_FUNC(svdupq_lane,_f32,,)(data, index);
94 }
95 
test_svdupq_lane_f64(svfloat64_t data,uint64_t index)96 svfloat64_t test_svdupq_lane_f64(svfloat64_t data, uint64_t index)
97 {
98   // CHECK-LABEL: test_svdupq_lane_f64
99   // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %data, i64 %index)
100   // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
101   return SVE_ACLE_FUNC(svdupq_lane,_f64,,)(data, index);
102 }
103 
test_svdupq_n_s8(int8_t x0,int8_t x1,int8_t x2,int8_t x3,int8_t x4,int8_t x5,int8_t x6,int8_t x7,int8_t x8,int8_t x9,int8_t x10,int8_t x11,int8_t x12,int8_t x13,int8_t x14,int8_t x15)104 svint8_t test_svdupq_n_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3,
105                           int8_t x4, int8_t x5, int8_t x6, int8_t x7,
106                           int8_t x8, int8_t x9, int8_t x10, int8_t x11,
107                           int8_t x12, int8_t x13, int8_t x14, int8_t x15)
108 {
109   // CHECK-LABEL: test_svdupq_n_s8
110   // CHECK: insertelement <16 x i8> undef, i8 %x0, i32 0
111   // <assume other insertelement>
112   // CHECK: %[[VEC:.*]] = insertelement <16 x i8> %[[X:.*]], i8 %x15, i32 15
113   // CHECK-NOT: insertelement
114   // CHECK: %[[INS:.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %[[VEC]], i64 0)
115   // CHECK: %[[DUPQ:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %[[INS]], i64 0)
116   // CHECK: ret <vscale x 16 x i8> %[[DUPQ]]
117   return SVE_ACLE_FUNC(svdupq,_n,_s8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
118 }
119 
test_svdupq_n_s16(int16_t x0,int16_t x1,int16_t x2,int16_t x3,int16_t x4,int16_t x5,int16_t x6,int16_t x7)120 svint16_t test_svdupq_n_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3,
121                             int16_t x4, int16_t x5, int16_t x6, int16_t x7)
122 {
123   // CHECK-LABEL: test_svdupq_n_s16
124   // CHECK: insertelement <8 x i16> undef, i16 %x0, i32 0
125   // <assume other insertelement>
126   // CHECK: %[[VEC:.*]] = insertelement <8 x i16> %[[X:.*]], i16 %x7, i32 7
127   // CHECK-NOT: insertelement
128   // CHECK: %[[INS:.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %[[VEC]], i64 0)
129   // CHECK: %[[DUPQ:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %[[INS]], i64 0)
130   // CHECK: ret <vscale x 8 x i16> %[[DUPQ]]
131   return SVE_ACLE_FUNC(svdupq,_n,_s16,)(x0, x1, x2, x3, x4, x5, x6, x7);
132 }
133 
test_svdupq_n_s32(int32_t x0,int32_t x1,int32_t x2,int32_t x3)134 svint32_t test_svdupq_n_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3)
135 {
136   // CHECK-LABEL: test_svdupq_n_s32
137   // CHECK: insertelement <4 x i32> undef, i32 %x0, i32 0
138   // <assume other insertelement>
139   // CHECK: %[[VEC:.*]] = insertelement <4 x i32> %[[X:.*]], i32 %x3, i32 3
140   // CHECK-NOT: insertelement
141   // CHECK: %[[INS:.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %[[VEC]], i64 0)
142   // CHECK: %[[DUPQ:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %[[INS]], i64 0)
143   // CHECK: ret <vscale x 4 x i32> %[[DUPQ]]
144   return SVE_ACLE_FUNC(svdupq,_n,_s32,)(x0, x1, x2, x3);
145 }
146 
test_svdupq_n_s64(int64_t x0,int64_t x1)147 svint64_t test_svdupq_n_s64(int64_t x0, int64_t x1)
148 {
149   // CHECK-LABEL: test_svdupq_n_s64
150   // CHECK: %[[SVEC:.*]] = insertelement <2 x i64> undef, i64 %x0, i32 0
151   // CHECK: %[[VEC:.*]] = insertelement <2 x i64> %[[SVEC]], i64 %x1, i32 1
152   // CHECK-NOT: insertelement
153   // CHECK: %[[INS:.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %[[VEC]], i64 0)
154   // CHECK: %[[DUPQ:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %[[INS]], i64 0)
155   // CHECK: ret <vscale x 2 x i64> %[[DUPQ]]
156   return SVE_ACLE_FUNC(svdupq,_n,_s64,)(x0, x1);
157 }
158 
test_svdupq_n_u8(uint8_t x0,uint8_t x1,uint8_t x2,uint8_t x3,uint8_t x4,uint8_t x5,uint8_t x6,uint8_t x7,uint8_t x8,uint8_t x9,uint8_t x10,uint8_t x11,uint8_t x12,uint8_t x13,uint8_t x14,uint8_t x15)159 svuint8_t test_svdupq_n_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3,
160                            uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7,
161                            uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11,
162                            uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15)
163 {
164   // CHECK-LABEL: test_svdupq_n_u8
165   // CHECK: insertelement <16 x i8> undef, i8 %x0, i32 0
166   // <assume other insertelement>
167   // CHECK: %[[VEC:.*]] = insertelement <16 x i8> %[[X:.*]], i8 %x15, i32 15
168   // CHECK-NOT: insertelement
169   // CHECK: %[[INS:.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %[[VEC]], i64 0)
170   // CHECK: %[[DUPQ:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %[[INS]], i64 0)
171   // CHECK: ret <vscale x 16 x i8> %[[DUPQ]]
172   return SVE_ACLE_FUNC(svdupq,_n,_u8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
173 }
174 
test_svdupq_n_u16(uint16_t x0,uint16_t x1,uint16_t x2,uint16_t x3,uint16_t x4,uint16_t x5,uint16_t x6,uint16_t x7)175 svuint16_t test_svdupq_n_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3,
176                              uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7)
177 {
178   // CHECK-LABEL: test_svdupq_n_u16
179   // CHECK: insertelement <8 x i16> undef, i16 %x0, i32 0
180   // <assume other insertelement>
181   // CHECK: %[[VEC:.*]] = insertelement <8 x i16> %[[X:.*]], i16 %x7, i32 7
182   // CHECK-NOT: insertelement
183   // CHECK: %[[INS:.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %[[VEC]], i64 0)
184   // CHECK: %[[DUPQ:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %[[INS]], i64 0)
185   // CHECK: ret <vscale x 8 x i16> %[[DUPQ]]
186   return SVE_ACLE_FUNC(svdupq,_n,_u16,)(x0, x1, x2, x3, x4, x5, x6, x7);
187 }
188 
test_svdupq_n_u32(uint32_t x0,uint32_t x1,uint32_t x2,uint32_t x3)189 svuint32_t test_svdupq_n_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3)
190 {
191   // CHECK-LABEL: test_svdupq_n_u32
192   // CHECK: insertelement <4 x i32> undef, i32 %x0, i32 0
193   // <assume other insertelement>
194   // CHECK: %[[VEC:.*]] = insertelement <4 x i32> %[[X:.*]], i32 %x3, i32 3
195   // CHECK-NOT: insertelement
196   // CHECK: %[[INS:.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %[[VEC]], i64 0)
197   // CHECK: %[[DUPQ:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %[[INS]], i64 0)
198   // CHECK: ret <vscale x 4 x i32> %[[DUPQ]]
199   return SVE_ACLE_FUNC(svdupq,_n,_u32,)(x0, x1, x2, x3);
200 }
201 
test_svdupq_n_u64(uint64_t x0,uint64_t x1)202 svuint64_t test_svdupq_n_u64(uint64_t x0, uint64_t x1)
203 {
204   // CHECK-LABEL: test_svdupq_n_u64
205   // CHECK: %[[SVEC:.*]] = insertelement <2 x i64> undef, i64 %x0, i32 0
206   // CHECK: %[[VEC:.*]] = insertelement <2 x i64> %[[SVEC]], i64 %x1, i32 1
207   // CHECK-NOT: insertelement
208   // CHECK: %[[INS:.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %[[VEC]], i64 0)
209   // CHECK: %[[DUPQ:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %[[INS]], i64 0)
210   // CHECK: ret <vscale x 2 x i64> %[[DUPQ]]
211   return SVE_ACLE_FUNC(svdupq,_n,_u64,)(x0, x1);
212 }
213 
test_svdupq_n_f16(float16_t x0,float16_t x1,float16_t x2,float16_t x3,float16_t x4,float16_t x5,float16_t x6,float16_t x7)214 svfloat16_t test_svdupq_n_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3,
215                               float16_t x4, float16_t x5, float16_t x6, float16_t x7)
216 {
217   // CHECK-LABEL: test_svdupq_n_f16
218   // CHECK: insertelement <8 x half> undef, half %x0, i32 0
219   // <assume other insertelement>
220   // CHECK: %[[VEC:.*]] = insertelement <8 x half> %[[X:.*]], half %x7, i32 7
221   // CHECK-NOT: insertelement
222   // CHECK: %[[INS:.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %[[VEC]], i64 0)
223   // CHECK: %[[DUPQ:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %[[INS]], i64 0)
224   // CHECK: ret <vscale x 8 x half> %[[DUPQ]]
225   return SVE_ACLE_FUNC(svdupq,_n,_f16,)(x0, x1, x2, x3, x4, x5, x6, x7);
226 }
227 
test_svdupq_n_f32(float32_t x0,float32_t x1,float32_t x2,float32_t x3)228 svfloat32_t test_svdupq_n_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3)
229 {
230   // CHECK-LABEL: test_svdupq_n_f32
231   // CHECK: insertelement <4 x float> undef, float %x0, i32 0
232   // <assume other insertelement>
233   // CHECK: %[[VEC:.*]] = insertelement <4 x float> %[[X:.*]], float %x3, i32 3
234   // CHECK-NOT: insertelement
235   // CHECK: %[[INS:.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %[[VEC]], i64 0)
236   // CHECK: %[[DUPQ:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %[[INS]], i64 0)
237   // CHECK: ret <vscale x 4 x float> %[[DUPQ]]
238   return SVE_ACLE_FUNC(svdupq,_n,_f32,)(x0, x1, x2, x3);
239 }
240 
test_svdupq_n_f64(float64_t x0,float64_t x1)241 svfloat64_t test_svdupq_n_f64(float64_t x0, float64_t x1)
242 {
243   // CHECK-LABEL: test_svdupq_n_f64
244   // CHECK: %[[SVEC:.*]] = insertelement <2 x double> undef, double %x0, i32 0
245   // CHECK: %[[VEC:.*]] = insertelement <2 x double> %[[SVEC]], double %x1, i32 1
246   // CHECK-NOT: insertelement
247   // CHECK: %[[INS:.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %[[VEC]], i64 0)
248   // CHECK: %[[DUPQ:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %[[INS]], i64 0)
249   // CHECK: ret <vscale x 2 x double> %[[DUPQ]]
250   return SVE_ACLE_FUNC(svdupq,_n,_f64,)(x0, x1);
251 }
252 
test_svdupq_n_b8(bool x0,bool x1,bool x2,bool x3,bool x4,bool x5,bool x6,bool x7,bool x8,bool x9,bool x10,bool x11,bool x12,bool x13,bool x14,bool x15)253 svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3,
254                           bool x4, bool x5, bool x6, bool x7,
255                           bool x8, bool x9, bool x10, bool x11,
256                           bool x12, bool x13, bool x14, bool x15)
257 {
258   // CHECK-LABEL: test_svdupq_n_b8
259   // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i8
260   // CHECK-DAG: %[[X15:.*]] = zext i1 %x15 to i8
261   // CHECK: insertelement <16 x i8> undef, i8 %[[X0]], i32 0
262   // <assume other insertelement>
263   // CHECK: %[[VEC:.*]] = insertelement <16 x i8> %[[X:.*]], i8 %[[X15]], i32 15
264   // CHECK-NOT: insertelement
265   // CHECK: %[[PTRUE:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
266   // CHECK: %[[INS:.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %[[VEC]], i64 0)
267   // CHECK: %[[DUPQ:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %[[INS]], i64 0)
268   // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
269   // CHECK: %[[CMP:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %[[PTRUE]], <vscale x 16 x i8> %[[DUPQ]], <vscale x 2 x i64> %[[ZERO]])
270   // CHECK: ret <vscale x 16 x i1> %[[CMP]]
271   return SVE_ACLE_FUNC(svdupq,_n,_b8,)(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
272 }
273 
test_svdupq_n_b16(bool x0,bool x1,bool x2,bool x3,bool x4,bool x5,bool x6,bool x7)274 svbool_t test_svdupq_n_b16(bool x0, bool x1, bool x2, bool x3,
275                            bool x4, bool x5, bool x6, bool x7)
276 {
277   // CHECK-LABEL: test_svdupq_n_b16
278   // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i16
279   // CHECK-DAG: %[[X7:.*]] = zext i1 %x7 to i16
280   // CHECK: insertelement <8 x i16> undef, i16 %[[X0]], i32 0
281   // <assume other insertelement>
282   // CHECK: %[[VEC:.*]] = insertelement <8 x i16> %[[X:.*]], i16 %[[X7]], i32 7
283   // CHECK-NOT: insertelement
284   // CHECK: %[[PTRUE:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
285   // CHECK: %[[INS:.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %[[VEC]], i64 0)
286   // CHECK: %[[DUPQ:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %[[INS]], i64 0)
287   // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
288   // CHECK: %[[CMP:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %[[PTRUE]], <vscale x 8 x i16> %[[DUPQ]], <vscale x 2 x i64> %[[ZERO]])
289   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[CMP]])
290   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
291   return SVE_ACLE_FUNC(svdupq,_n,_b16,)(x0, x1, x2, x3, x4, x5, x6, x7);
292 }
293 
test_svdupq_n_b32(bool x0,bool x1,bool x2,bool x3)294 svbool_t test_svdupq_n_b32(bool x0, bool x1, bool x2, bool x3)
295 {
296   // CHECK-LABEL: test_svdupq_n_b32
297   // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i32
298   // CHECK-DAG: %[[X3:.*]] = zext i1 %x3 to i32
299   // CHECK: insertelement <4 x i32> undef, i32 %[[X0]], i32 0
300   // <assume other insertelement>
301   // CHECK: %[[VEC:.*]] = insertelement <4 x i32> %[[X:.*]], i32 %[[X3]], i32 3
302   // CHECK-NOT: insertelement
303   // CHECK: %[[PTRUE:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
304   // CHECK: %[[INS:.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %[[VEC]], i64 0)
305   // CHECK: %[[DUPQ:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %[[INS]], i64 0)
306   // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
307   // CHECK: %[[CMP:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %[[PTRUE]], <vscale x 4 x i32> %[[DUPQ]], <vscale x 2 x i64> %[[ZERO]])
308   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[CMP]])
309   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
310   return SVE_ACLE_FUNC(svdupq,_n,_b32,)(x0, x1, x2, x3);
311 }
312 
test_svdupq_n_b64(bool x0,bool x1)313 svbool_t test_svdupq_n_b64(bool x0, bool x1)
314 {
315   // CHECK-LABEL: test_svdupq_n_b64
316   // CHECK-DAG: %[[X0:.*]] = zext i1 %x0 to i64
317   // CHECK-DAG: %[[X1:.*]] = zext i1 %x1 to i64
318   // CHECK: %[[SVEC:.*]] = insertelement <2 x i64> undef, i64 %[[X0]], i32 0
319   // CHECK: %[[VEC:.*]] = insertelement <2 x i64> %[[SVEC]], i64 %[[X1]], i32 1
320   // CHECK-NOT: insertelement
321   // CHECK: %[[PTRUE:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
322   // CHECK: %[[INS:.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %[[VEC]], i64 0)
323   // CHECK: %[[DUPQ:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %[[INS]], i64 0)
324   // CHECK: %[[ZERO:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 0)
325   // CHECK: %[[CMP:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %[[PTRUE]], <vscale x 2 x i64> %[[DUPQ]], <vscale x 2 x i64> %[[ZERO]])
326   // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[CMP]])
327   // CHECK: ret <vscale x 16 x i1> %[[CAST]]
328   return SVE_ACLE_FUNC(svdupq,_n,_b64,)(x0, x1);
329 }
330