1 // REQUIRES: powerpc-registered-target
2 // RUN: %clang_cc1 -target-feature +vsx -target-feature +altivec \
3 // RUN: -target-cpu pwr10 -triple powerpc64le-unknown-unknown -emit-llvm %s \
4 // RUN: -o - | FileCheck %s
5
6 // RUN: %clang_cc1 -target-feature +vsx -target-feature +altivec \
7 // RUN: -target-cpu pwr10 -triple powerpc64-unknown-unknown -emit-llvm %s \
8 // RUN: -o - | FileCheck %s -check-prefix=CHECK-BE
9
10 // RUN: %clang_cc1 -target-feature +vsx -target-feature +altivec \
11 // RUN: -target-cpu pwr10 -triple powerpc64le-unknown-unknown -emit-llvm %s \
12 // RUN: -o - | FileCheck %s -check-prefix=CHECK-LE
13
14 #include <altivec.h>
15
16 vector signed char vsca, vscb;
17 vector unsigned char vuca, vucb, vucc;
18 vector signed short vssa, vssb;
19 vector unsigned short vusa, vusb, vusc;
20 vector signed int vsia, vsib;
21 vector unsigned int vuia, vuib, vuic;
22 vector signed long long vslla, vsllb;
23 vector unsigned long long vulla, vullb, vullc;
24 vector unsigned __int128 vui128a, vui128b, vui128c;
25 vector float vfa, vfb;
26 vector double vda, vdb;
27 unsigned int uia, uib;
28 unsigned char uca;
29 unsigned short usa;
30 unsigned long long ulla;
31
test_vpdepd(void)32 vector unsigned long long test_vpdepd(void) {
33 // CHECK: @llvm.ppc.altivec.vpdepd(<2 x i64>
34 // CHECK-NEXT: ret <2 x i64>
35 return vec_pdep(vulla, vullb);
36 }
37
test_vpextd(void)38 vector unsigned long long test_vpextd(void) {
39 // CHECK: @llvm.ppc.altivec.vpextd(<2 x i64>
40 // CHECK-NEXT: ret <2 x i64>
41 return vec_pext(vulla, vullb);
42 }
43
test_vcfuged(void)44 vector unsigned long long test_vcfuged(void) {
45 // CHECK: @llvm.ppc.altivec.vcfuged(<2 x i64>
46 // CHECK-NEXT: ret <2 x i64>
47 return vec_cfuge(vulla, vullb);
48 }
49
test_vgnb_1(void)50 unsigned long long test_vgnb_1(void) {
51 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 2)
52 // CHECK-NEXT: ret i64
53 return vec_gnb(vui128a, 2);
54 }
55
test_vgnb_2(void)56 unsigned long long test_vgnb_2(void) {
57 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 7)
58 // CHECK-NEXT: ret i64
59 return vec_gnb(vui128a, 7);
60 }
61
test_vgnb_3(void)62 unsigned long long test_vgnb_3(void) {
63 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 5)
64 // CHECK-NEXT: ret i64
65 return vec_gnb(vui128a, 5);
66 }
67
test_xxeval_uc(void)68 vector unsigned char test_xxeval_uc(void) {
69 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 0)
70 // CHECK: ret <16 x i8>
71 return vec_ternarylogic(vuca, vucb, vucc, 0);
72 }
73
test_xxeval_us(void)74 vector unsigned short test_xxeval_us(void) {
75 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 255)
76 // CHECK: ret <8 x i16>
77 return vec_ternarylogic(vusa, vusb, vusc, 255);
78 }
79
test_xxeval_ui(void)80 vector unsigned int test_xxeval_ui(void) {
81 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 150)
82 // CHECK: ret <4 x i32>
83 return vec_ternarylogic(vuia, vuib, vuic, 150);
84 }
85
test_xxeval_ull(void)86 vector unsigned long long test_xxeval_ull(void) {
87 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 1)
88 // CHECK: ret <2 x i64>
89 return vec_ternarylogic(vulla, vullb, vullc, 1);
90 }
91
test_xxeval_ui128(void)92 vector unsigned __int128 test_xxeval_ui128(void) {
93 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 246)
94 // CHECK: ret <1 x i128>
95 return vec_ternarylogic(vui128a, vui128b, vui128c, 246);
96 }
97
test_xxgenpcvbm(void)98 vector unsigned char test_xxgenpcvbm(void) {
99 // CHECK: @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %{{.+}}, i32
100 // CHECK-NEXT: ret <16 x i8>
101 return vec_genpcvm(vuca, 0);
102 }
103
test_xxgenpcvhm(void)104 vector unsigned short test_xxgenpcvhm(void) {
105 // CHECK: @llvm.ppc.vsx.xxgenpcvhm(<8 x i16> %{{.+}}, i32
106 // CHECK-NEXT: ret <8 x i16>
107 return vec_genpcvm(vusa, 0);
108 }
109
test_xxgenpcvwm(void)110 vector unsigned int test_xxgenpcvwm(void) {
111 // CHECK: @llvm.ppc.vsx.xxgenpcvwm(<4 x i32> %{{.+}}, i32
112 // CHECK-NEXT: ret <4 x i32>
113 return vec_genpcvm(vuia, 0);
114 }
115
test_xxgenpcvdm(void)116 vector unsigned long long test_xxgenpcvdm(void) {
117 // CHECK: @llvm.ppc.vsx.xxgenpcvdm(<2 x i64> %{{.+}}, i32
118 // CHECK-NEXT: ret <2 x i64>
119 return vec_genpcvm(vulla, 0);
120 }
121
test_vec_vclrl_sc(void)122 vector signed char test_vec_vclrl_sc(void) {
123 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
124 // CHECK-BE-NEXT: ret <16 x i8>
125 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
126 // CHECK-LE-NEXT: ret <16 x i8>
127 return vec_clrl(vsca, uia);
128 }
129
test_vec_clrl_uc(void)130 vector unsigned char test_vec_clrl_uc(void) {
131 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
132 // CHECK-BE-NEXT: ret <16 x i8>
133 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
134 // CHECK-LE-NEXT: ret <16 x i8>
135 return vec_clrl(vuca, uia);
136 }
137
test_vec_vclrr_sc(void)138 vector signed char test_vec_vclrr_sc(void) {
139 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
140 // CHECK-BE-NEXT: ret <16 x i8>
141 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
142 // CHECK-LE-NEXT: ret <16 x i8>
143 return vec_clrr(vsca, uia);
144 }
145
test_vec_clrr_uc(void)146 vector unsigned char test_vec_clrr_uc(void) {
147 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
148 // CHECK-BE-NEXT: ret <16 x i8>
149 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
150 // CHECK-LE-NEXT: ret <16 x i8>
151 return vec_clrr(vuca, uia);
152 }
153
test_vclzdm(void)154 vector unsigned long long test_vclzdm(void) {
155 // CHECK: @llvm.ppc.altivec.vclzdm(<2 x i64>
156 // CHECK-NEXT: ret <2 x i64>
157 return vec_cntlzm(vulla, vullb);
158 }
159
test_vctzdm(void)160 vector unsigned long long test_vctzdm(void) {
161 // CHECK: @llvm.ppc.altivec.vctzdm(<2 x i64>
162 // CHECK-NEXT: ret <2 x i64>
163 return vec_cnttzm(vulla, vullb);
164 }
165
test_vec_sldb_sc(void)166 vector signed char test_vec_sldb_sc(void) {
167 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
168 // CHECK-NEXT: ret <16 x i8>
169 return vec_sldb(vsca, vscb, 0);
170 }
171
test_vec_sldb_uc(void)172 vector unsigned char test_vec_sldb_uc(void) {
173 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
174 // CHECK-NEXT: ret <16 x i8>
175 return vec_sldb(vuca, vucb, 1);
176 }
177
test_vec_sldb_ss(void)178 vector signed short test_vec_sldb_ss(void) {
179 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
180 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
181 // CHECK-NEXT: ret <8 x i16>
182 return vec_sldb(vssa, vssb, 2);
183 }
184
test_vec_sldb_us(void)185 vector unsigned short test_vec_sldb_us(void) {
186 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
187 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
188 // CHECK-NEXT: ret <8 x i16>
189 return vec_sldb(vusa, vusb, 3);
190 }
191
test_vec_sldb_si(void)192 vector signed int test_vec_sldb_si(void) {
193 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
194 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
195 // CHECK-NEXT: ret <4 x i32>
196 return vec_sldb(vsia, vsib, 4);
197 }
198
test_vec_sldb_ui(void)199 vector unsigned int test_vec_sldb_ui(void) {
200 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
201 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
202 // CHECK-NEXT: ret <4 x i32>
203 return vec_sldb(vuia, vuib, 5);
204 }
205
test_vec_sldb_sll(void)206 vector signed long long test_vec_sldb_sll(void) {
207 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
208 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
209 // CHECK-NEXT: ret <2 x i64>
210 return vec_sldb(vslla, vsllb, 6);
211 }
212
test_vec_sldb_ull(void)213 vector unsigned long long test_vec_sldb_ull(void) {
214 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
215 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
216 // CHECK-NEXT: ret <2 x i64>
217 return vec_sldb(vulla, vullb, 7);
218 }
219
test_vec_srdb_sc(void)220 vector signed char test_vec_srdb_sc(void) {
221 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
222 // CHECK-NEXT: ret <16 x i8>
223 return vec_srdb(vsca, vscb, 8);
224 }
225
test_vec_srdb_uc(void)226 vector unsigned char test_vec_srdb_uc(void) {
227 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
228 // CHECK-NEXT: ret <16 x i8>
229 return vec_srdb(vuca, vucb, 9);
230 }
231
test_vec_srdb_ss(void)232 vector signed short test_vec_srdb_ss(void) {
233 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
234 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
235 // CHECK-NEXT: ret <8 x i16>
236 return vec_srdb(vssa, vssb, 10);
237 }
238
test_vec_srdb_us(void)239 vector unsigned short test_vec_srdb_us(void) {
240 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
241 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
242 // CHECK-NEXT: ret <8 x i16>
243 return vec_srdb(vusa, vusb, 3);
244 }
245
test_vec_srdb_si(void)246 vector signed int test_vec_srdb_si(void) {
247 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
248 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
249 // CHECK-NEXT: ret <4 x i32>
250 return vec_srdb(vsia, vsib, 4);
251 }
252
test_vec_srdb_ui(void)253 vector unsigned int test_vec_srdb_ui(void) {
254 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
255 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
256 // CHECK-NEXT: ret <4 x i32>
257 return vec_srdb(vuia, vuib, 5);
258 }
259
test_vec_srdb_sll(void)260 vector signed long long test_vec_srdb_sll(void) {
261 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
262 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
263 // CHECK-NEXT: ret <2 x i64>
264 return vec_srdb(vslla, vsllb, 6);
265 }
266
test_vec_srdb_ull(void)267 vector unsigned long long test_vec_srdb_ull(void) {
268 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
269 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
270 // CHECK-NEXT: ret <2 x i64>
271 return vec_srdb(vulla, vullb, 7);
272 }
273
test_vec_permx_sc(void)274 vector signed char test_vec_permx_sc(void) {
275 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
276 // CHECK-NEXT: ret <16 x i8>
277 return vec_permx(vsca, vscb, vucc, 0);
278 }
279
test_vec_permx_uc(void)280 vector unsigned char test_vec_permx_uc(void) {
281 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
282 // CHECK-NEXT: ret <16 x i8>
283 return vec_permx(vuca, vucb, vucc, 1);
284 }
285
test_vec_permx_ss(void)286 vector signed short test_vec_permx_ss(void) {
287 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
288 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
289 // CHECK-NEXT: ret <8 x i16>
290 return vec_permx(vssa, vssb, vucc, 2);
291 }
292
test_vec_permx_us(void)293 vector unsigned short test_vec_permx_us(void) {
294 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
295 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
296 // CHECK-NEXT: ret <8 x i16>
297 return vec_permx(vusa, vusb, vucc, 3);
298 }
299
test_vec_permx_si(void)300 vector signed int test_vec_permx_si(void) {
301 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
302 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
303 // CHECK-NEXT: ret <4 x i32>
304 return vec_permx(vsia, vsib, vucc, 4);
305 }
306
test_vec_permx_ui(void)307 vector unsigned int test_vec_permx_ui(void) {
308 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
309 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
310 // CHECK-NEXT: ret <4 x i32>
311 return vec_permx(vuia, vuib, vucc, 5);
312 }
313
test_vec_permx_sll(void)314 vector signed long long test_vec_permx_sll(void) {
315 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
316 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
317 // CHECK-NEXT: ret <2 x i64>
318 return vec_permx(vslla, vsllb, vucc, 6);
319 }
320
test_vec_permx_ull(void)321 vector unsigned long long test_vec_permx_ull(void) {
322 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
323 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
324 // CHECK-NEXT: ret <2 x i64>
325 return vec_permx(vulla, vullb, vucc, 7);
326 }
327
test_vec_permx_f(void)328 vector float test_vec_permx_f(void) {
329 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
330 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x float>
331 // CHECK-NEXT: ret <4 x float>
332 return vec_permx(vfa, vfb, vucc, 0);
333 }
334
test_vec_permx_d(void)335 vector double test_vec_permx_d(void) {
336 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
337 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x double>
338 // CHECK-NEXT: ret <2 x double>
339 return vec_permx(vda, vdb, vucc, 1);
340 }
341
test_vec_blend_sc(void)342 vector signed char test_vec_blend_sc(void) {
343 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
344 // CHECK-NEXT: ret <16 x i8>
345 return vec_blendv(vsca, vscb, vucc);
346 }
347
test_vec_blend_uc(void)348 vector unsigned char test_vec_blend_uc(void) {
349 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
350 // CHECK-NEXT: ret <16 x i8>
351 return vec_blendv(vuca, vucb, vucc);
352 }
353
test_vec_blend_ss(void)354 vector signed short test_vec_blend_ss(void) {
355 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
356 // CHECK-NEXT: ret <8 x i16>
357 return vec_blendv(vssa, vssb, vusc);
358 }
359
test_vec_blend_us(void)360 vector unsigned short test_vec_blend_us(void) {
361 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
362 // CHECK-NEXT: ret <8 x i16>
363 return vec_blendv(vusa, vusb, vusc);
364 }
365
test_vec_blend_si(void)366 vector signed int test_vec_blend_si(void) {
367 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
368 // CHECK-NEXT: ret <4 x i32>
369 return vec_blendv(vsia, vsib, vuic);
370 }
371
test_vec_blend_ui(void)372 vector unsigned int test_vec_blend_ui(void) {
373 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
374 // CHECK-NEXT: ret <4 x i32>
375 return vec_blendv(vuia, vuib, vuic);
376 }
377
test_vec_blend_sll(void)378 vector signed long long test_vec_blend_sll(void) {
379 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
380 // CHECK-NEXT: ret <2 x i64>
381 return vec_blendv(vslla, vsllb, vullc);
382 }
383
test_vec_blend_ull(void)384 vector unsigned long long test_vec_blend_ull(void) {
385 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
386 // CHECK-NEXT: ret <2 x i64>
387 return vec_blendv(vulla, vullb, vullc);
388 }
389
test_vec_blend_f(void)390 vector float test_vec_blend_f(void) {
391 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
392 // CHECK-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
393 // CHECK-NEXT: ret <4 x float>
394 return vec_blendv(vfa, vfb, vuic);
395 }
396
test_vec_blend_d(void)397 vector double test_vec_blend_d(void) {
398 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
399 // CHECK-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
400 // CHECK-NEXT: ret <2 x double>
401 return vec_blendv(vda, vdb, vullc);
402 }
403
test_vec_insertl_uc(void)404 vector unsigned char test_vec_insertl_uc(void) {
405 // CHECK-BE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i64 %{{.+}}, i64
406 // CHECK-BE-NEXT: ret <16 x i8>
407 // CHECK-LE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i64 %{{.+}}, i64
408 // CHECK-LE-NEXT: ret <16 x i8>
409 return vec_insertl(uca, vuca, uia);
410 }
411
test_vec_insertl_us(void)412 vector unsigned short test_vec_insertl_us(void) {
413 // CHECK-BE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i64 %{{.+}}, i64
414 // CHECK-BE-NEXT: ret <8 x i16>
415 // CHECK-LE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i64 %{{.+}}, i64
416 // CHECK-LE-NEXT: ret <8 x i16>
417 return vec_insertl(usa, vusa, uia);
418 }
419
test_vec_insertl_ui(void)420 vector unsigned int test_vec_insertl_ui(void) {
421 // CHECK-BE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i64 %{{.+}}, i64
422 // CHECK-BE-NEXT: ret <4 x i32>
423 // CHECK-LE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i64 %{{.+}}, i64
424 // CHECK-LE-NEXT: ret <4 x i32>
425 return vec_insertl(uib, vuia, uia);
426 }
427
test_vec_insertl_ul(void)428 vector unsigned long long test_vec_insertl_ul(void) {
429 // CHECK-BE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
430 // CHECK-BE-NEXT: ret <2 x i64>
431 // CHECK-LE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
432 // CHECK-LE-NEXT: ret <2 x i64>
433 return vec_insertl(ulla, vulla, uia);
434 }
435
test_vec_insertl_ucv(void)436 vector unsigned char test_vec_insertl_ucv(void) {
437 // CHECK-BE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i64 %{{.+}}, <16 x i8>
438 // CHECK-BE-NEXT: ret <16 x i8>
439 // CHECK-LE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i64 %{{.+}}, <16 x i8>
440 // CHECK-LE-NEXT: ret <16 x i8>
441 return vec_insertl(vuca, vucb, uia);
442 }
443
test_vec_insertl_usv(void)444 vector unsigned short test_vec_insertl_usv(void) {
445 // CHECK-BE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i64 %{{.+}}, <8 x i16>
446 // CHECK-BE-NEXT: ret <8 x i16>
447 // CHECK-LE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i64 %{{.+}}, <8 x i16>
448 // CHECK-LE-NEXT: ret <8 x i16>
449 return vec_insertl(vusa, vusb, uia);
450 }
451
test_vec_insertl_uiv(void)452 vector unsigned int test_vec_insertl_uiv(void) {
453 // CHECK-BE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i64 %{{.+}}, <4 x i32>
454 // CHECK-BE-NEXT: ret <4 x i32>
455 // CHECK-LE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i64 %{{.+}}, <4 x i32>
456 // CHECK-LE-NEXT: ret <4 x i32>
457 return vec_insertl(vuia, vuib, uia);
458 }
459
test_vec_inserth_uc(void)460 vector unsigned char test_vec_inserth_uc(void) {
461 // CHECK-BE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i64 %{{.+}}, i64
462 // CHECK-BE-NEXT: ret <16 x i8>
463 // CHECK-LE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i64 %{{.+}}, i64
464 // CHECK-LE-NEXT: ret <16 x i8>
465 return vec_inserth(uca, vuca, uia);
466 }
467
test_vec_inserth_us(void)468 vector unsigned short test_vec_inserth_us(void) {
469 // CHECK-BE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i64 %{{.+}}, i64
470 // CHECK-BE-NEXT: ret <8 x i16>
471 // CHECK-LE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i64 %{{.+}}, i64
472 // CHECK-LE-NEXT: ret <8 x i16>
473 return vec_inserth(usa, vusa, uia);
474 }
475
test_vec_inserth_ui(void)476 vector unsigned int test_vec_inserth_ui(void) {
477 // CHECK-BE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i64 %{{.+}}, i64
478 // CHECK-BE-NEXT: ret <4 x i32>
479 // CHECK-LE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i64 %{{.+}}, i64
480 // CHECK-LE-NEXT: ret <4 x i32>
481 return vec_inserth(uib, vuia, uia);
482 }
483
test_vec_inserth_ul(void)484 vector unsigned long long test_vec_inserth_ul(void) {
485 // CHECK-BE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
486 // CHECK-BE-NEXT: ret <2 x i64>
487 // CHECK-LE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
488 // CHECK-LE-NEXT: ret <2 x i64>
489 return vec_inserth(ulla, vulla, uia);
490 }
491
test_vec_inserth_ucv(void)492 vector unsigned char test_vec_inserth_ucv(void) {
493 // CHECK-BE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i64 %{{.+}}, <16 x i8>
494 // CHECK-BE-NEXT: ret <16 x i8>
495 // CHECK-LE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i64 %{{.+}}, <16 x i8>
496 // CHECK-LE-NEXT: ret <16 x i8>
497 return vec_inserth(vuca, vucb, uia);
498 }
499
test_vec_inserth_usv(void)500 vector unsigned short test_vec_inserth_usv(void) {
501 // CHECK-BE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i64 %{{.+}}, <8 x i16>
502 // CHECK-BE-NEXT: ret <8 x i16>
503 // CHECK-LE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i64 %{{.+}}, <8 x i16>
504 // CHECK-LE-NEXT: ret <8 x i16>
505 return vec_inserth(vusa, vusb, uia);
506 }
507
test_vec_inserth_uiv(void)508 vector unsigned int test_vec_inserth_uiv(void) {
509 // CHECK-BE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i64 %{{.+}}, <4 x i32>
510 // CHECK-BE-NEXT: ret <4 x i32>
511 // CHECK-LE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i64 %{{.+}}, <4 x i32>
512 // CHECK-LE-NEXT: ret <4 x i32>
513 return vec_inserth(vuia, vuib, uia);
514 }
515
test_vec_vec_splati_si(void)516 vector signed int test_vec_vec_splati_si(void) {
517 // CHECK-BE: ret <4 x i32> <i32 -17, i32 -17, i32 -17, i32 -17>
518 // CHECK: ret <4 x i32> <i32 -17, i32 -17, i32 -17, i32 -17>
519 return vec_splati(-17);
520 }
521
test_vec_vec_splati_ui(void)522 vector unsigned int test_vec_vec_splati_ui(void) {
523 // CHECK-BE: ret <4 x i32> <i32 16, i32 16, i32 16, i32 16>
524 // CHECK: ret <4 x i32> <i32 16, i32 16, i32 16, i32 16>
525 return vec_splati(16U);
526 }
527
test_vec_vec_splati_f(void)528 vector float test_vec_vec_splati_f(void) {
529 // CHECK-BE: ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
530 // CHECK: ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
531 return vec_splati(1.0f);
532 }
533
test_vec_vec_splatid(void)534 vector double test_vec_vec_splatid(void) {
535 // CHECK-BE: [[T1:%.+]] = fpext float %{{.+}} to double
536 // CHECK-BE-NEXT: [[T2:%.+]] = insertelement <2 x double> undef, double [[T1:%.+]], i32 0
537 // CHECK-BE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> undef, <2 x i32> zeroinitialize
538 // CHECK-BE-NEXT: ret <2 x double> [[T3:%.+]]
539 // CHECK: [[T1:%.+]] = fpext float %{{.+}} to double
540 // CHECK-NEXT: [[T2:%.+]] = insertelement <2 x double> undef, double [[T1:%.+]], i32 0
541 // CHECK-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> undef, <2 x i32> zeroinitialize
542 // CHECK-NEXT: ret <2 x double> [[T3:%.+]]
543 return vec_splatid(1.0);
544 }
545
test_vec_vec_splati_ins_si(void)546 vector signed int test_vec_vec_splati_ins_si(void) {
547 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
548 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
549 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
550 // CHECK-BE: ret <4 x i32>
551 // CHECK: [[T1:%.+]] = sub i32 1, %{{.+}}
552 // CHECK: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
553 // CHECK: [[T2:%.+]] = sub i32 3, %{{.+}}
554 // CHECK: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
555 // CHECK: ret <4 x i32>
556 return vec_splati_ins(vsia, 0, -17);
557 }
558
test_vec_vec_splati_ins_ui(void)559 vector unsigned int test_vec_vec_splati_ins_ui(void) {
560 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
561 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
562 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
563 // CHECK-BE: ret <4 x i32>
564 // CHECK: [[T1:%.+]] = sub i32 1, %{{.+}}
565 // CHECK: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
566 // CHECK: [[T2:%.+]] = sub i32 3, %{{.+}}
567 // CHECK: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
568 // CHECK: ret <4 x i32>
569 return vec_splati_ins(vuia, 1, 16U);
570 }
571
test_vec_vec_splati_ins_f(void)572 vector float test_vec_vec_splati_ins_f(void) {
573 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 %{{.+}}
574 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
575 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
576 // CHECK-BE: ret <4 x float>
577 // CHECK: [[T1:%.+]] = sub i32 1, %{{.+}}
578 // CHECK: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
579 // CHECK: [[T2:%.+]] = sub i32 3, %{{.+}}
580 // CHECK: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T2]]
581 // CHECK: ret <4 x float>
582 return vec_splati_ins(vfa, 0, 1.0f);
583 }
584
test_vec_test_lsbb_all_ones(void)585 int test_vec_test_lsbb_all_ones(void) {
586 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i1 true
587 // CHECK-NEXT: ret i32
588 return vec_test_lsbb_all_ones(vuca);
589 }
590
test_vec_test_lsbb_all_zeros(void)591 int test_vec_test_lsbb_all_zeros(void) {
592 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i1 false
593 // CHECK-NEXT: ret i32
594 return vec_test_lsbb_all_zeros(vuca);
595 }
596