1 // REQUIRES: powerpc-registered-target
2 // RUN: %clang_cc1 -target-feature +vsx \
3 // RUN: -target-cpu pwr10 -triple powerpc64-unknown-unknown -emit-llvm %s \
4 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-BE,CHECK
5 // RUN: %clang_cc1 -target-feature +vsx \
6 // RUN: -target-cpu pwr10 -triple powerpc64le-unknown-unknown -emit-llvm %s \
7 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-LE,CHECK
8
9 #include <altivec.h>
10
11 vector signed __int128 vi128a;
12 vector signed char vsca, vscb;
13 vector unsigned char vuca, vucb, vucc;
14 vector signed short vssa, vssb;
15 vector unsigned short vusa, vusb, vusc;
16 vector signed int vsia, vsib;
17 vector unsigned int vuia, vuib, vuic;
18 vector signed long long vslla, vsllb;
19 vector unsigned long long vulla, vullb, vullc;
20 vector signed __int128 vsi128a, vsi128b, vsi128c;
21 vector unsigned __int128 vui128a, vui128b, vui128c;
22 vector float vfa, vfb;
23 vector double vda, vdb;
24 float fa;
25 double da;
26 signed int sia;
27 signed int *iap;
28 unsigned int uia, uib, *uiap;
29 signed char *cap;
30 unsigned char uca;
31 const unsigned char *ucap;
32 const signed short *sap;
33 unsigned short usa;
34 const unsigned short *usap;
35 const signed long long *llap;
36 signed long long llb;
37 unsigned long long ulla;
38 const unsigned long long *ullap;
39
test_vec_mul_sll(void)40 vector signed long long test_vec_mul_sll(void) {
41 // CHECK: mul <2 x i64>
42 // CHECK-NEXT: ret <2 x i64>
43 return vec_mul(vslla, vsllb);
44 }
45
test_vec_mul_ull(void)46 vector unsigned long long test_vec_mul_ull(void) {
47 // CHECK: mul <2 x i64>
48 // CHECK-NEXT: ret <2 x i64>
49 return vec_mul(vulla, vullb);
50 }
51
test_vec_div_si(void)52 vector signed int test_vec_div_si(void) {
53 // CHECK: sdiv <4 x i32>
54 // CHECK-NEXT: ret <4 x i32>
55 return vec_div(vsia, vsib);
56 }
57
test_vec_div_ui(void)58 vector unsigned int test_vec_div_ui(void) {
59 // CHECK: udiv <4 x i32>
60 // CHECK-NEXT: ret <4 x i32>
61 return vec_div(vuia, vuib);
62 }
63
test_vec_div_sll(void)64 vector signed long long test_vec_div_sll(void) {
65 // CHECK: sdiv <2 x i64>
66 // CHECK-NEXT: ret <2 x i64>
67 return vec_div(vslla, vsllb);
68 }
69
test_vec_div_ull(void)70 vector unsigned long long test_vec_div_ull(void) {
71 // CHECK: udiv <2 x i64>
72 // CHECK-NEXT: ret <2 x i64>
73 return vec_div(vulla, vullb);
74 }
75
test_vec_div_u128(void)76 vector unsigned __int128 test_vec_div_u128(void) {
77 // CHECK: udiv <1 x i128>
78 // CHECK-NEXT: ret <1 x i128>
79 return vec_div(vui128a, vui128b);
80 }
81
test_vec_div_s128(void)82 vector signed __int128 test_vec_div_s128(void) {
83 // CHECK: sdiv <1 x i128>
84 // CHECK-NEXT: ret <1 x i128>
85 return vec_div(vsi128a, vsi128b);
86 }
87
test_vec_dive_si(void)88 vector signed int test_vec_dive_si(void) {
89 // CHECK: @llvm.ppc.altivec.vdivesw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
90 // CHECK-NEXT: ret <4 x i32>
91 return vec_dive(vsia, vsib);
92 }
93
test_vec_dive_ui(void)94 vector unsigned int test_vec_dive_ui(void) {
95 // CHECK: @llvm.ppc.altivec.vdiveuw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
96 // CHECK-NEXT: ret <4 x i32>
97 return vec_dive(vuia, vuib);
98 }
99
test_vec_dive_sll(void)100 vector signed long long test_vec_dive_sll(void) {
101 // CHECK: @llvm.ppc.altivec.vdivesd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
102 // CHECK-NEXT: ret <2 x i64>
103 return vec_dive(vslla, vsllb);
104 }
105
test_vec_dive_ull(void)106 vector unsigned long long test_vec_dive_ull(void) {
107 // CHECK: @llvm.ppc.altivec.vdiveud(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
108 // CHECK-NEXT: ret <2 x i64>
109 return vec_dive(vulla, vullb);
110 }
111
test_vec_dive_u128(void)112 vector unsigned __int128 test_vec_dive_u128(void) {
113 // CHECK: @llvm.ppc.altivec.vdiveuq(<1 x i128> %{{.+}}, <1 x i128> %{{.+}})
114 // CHECK-NEXT: ret <1 x i128>
115 return vec_dive(vui128a, vui128b);
116 }
117
test_vec_dive_s128(void)118 vector signed __int128 test_vec_dive_s128(void) {
119 // CHECK: @llvm.ppc.altivec.vdivesq(<1 x i128> %{{.+}}, <1 x i128> %{{.+}})
120 // CHECK-NEXT: ret <1 x i128>
121 return vec_dive(vsi128a, vsi128b);
122 }
123
test_vec_mulh_si(void)124 vector signed int test_vec_mulh_si(void) {
125 // CHECK: @llvm.ppc.altivec.vmulhsw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
126 // CHECK-NEXT: ret <4 x i32>
127 return vec_mulh(vsia, vsib);
128 }
129
test_vec_mulh_ui(void)130 vector unsigned int test_vec_mulh_ui(void) {
131 // CHECK: @llvm.ppc.altivec.vmulhuw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
132 // CHECK-NEXT: ret <4 x i32>
133 return vec_mulh(vuia, vuib);
134 }
135
test_vec_mulh_sll(void)136 vector signed long long test_vec_mulh_sll(void) {
137 // CHECK: @llvm.ppc.altivec.vmulhsd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
138 // CHECK-NEXT: ret <2 x i64>
139 return vec_mulh(vslla, vsllb);
140 }
141
test_vec_mulh_ull(void)142 vector unsigned long long test_vec_mulh_ull(void) {
143 // CHECK: @llvm.ppc.altivec.vmulhud(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
144 // CHECK-NEXT: ret <2 x i64>
145 return vec_mulh(vulla, vullb);
146 }
147
test_vec_mod_si(void)148 vector signed int test_vec_mod_si(void) {
149 // CHECK: srem <4 x i32>
150 // CHECK-NEXT: ret <4 x i32>
151 return vec_mod(vsia, vsib);
152 }
153
test_vec_mod_ui(void)154 vector unsigned int test_vec_mod_ui(void) {
155 // CHECK: urem <4 x i32>
156 // CHECK-NEXT: ret <4 x i32>
157 return vec_mod(vuia, vuib);
158 }
159
test_vec_mod_sll(void)160 vector signed long long test_vec_mod_sll(void) {
161 // CHECK: srem <2 x i64>
162 // CHECK-NEXT: ret <2 x i64>
163 return vec_mod(vslla, vsllb);
164 }
165
test_vec_mod_ull(void)166 vector unsigned long long test_vec_mod_ull(void) {
167 // CHECK: urem <2 x i64>
168 // CHECK-NEXT: ret <2 x i64>
169 return vec_mod(vulla, vullb);
170 }
171
test_xvcvspbf16(vector unsigned char vc)172 vector unsigned char test_xvcvspbf16(vector unsigned char vc) {
173 // CHECK-LABEL: @test_xvcvspbf16(
174 // CHECK: [[TMP0:%.*]] = call <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8> [[VC:%.*]])
175 return __builtin_vsx_xvcvspbf16(vc);
176 }
177
test_xvcvbf16spn(vector unsigned char vc)178 vector unsigned char test_xvcvbf16spn(vector unsigned char vc) {
179 // CHECK-LABEL: @test_xvcvbf16spn(
180 // CHECK: [[TMP0:%.*]] = call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> [[VC:%.*]])
181 return __builtin_vsx_xvcvbf16spn(vc);
182 }
183
test_vpdepd(void)184 vector unsigned long long test_vpdepd(void) {
185 // CHECK: @llvm.ppc.altivec.vpdepd(<2 x i64>
186 // CHECK-NEXT: ret <2 x i64>
187 return vec_pdep(vulla, vullb);
188 }
189
test_vpextd(void)190 vector unsigned long long test_vpextd(void) {
191 // CHECK: @llvm.ppc.altivec.vpextd(<2 x i64>
192 // CHECK-NEXT: ret <2 x i64>
193 return vec_pext(vulla, vullb);
194 }
195
test_vec_stril_uc(void)196 vector unsigned char test_vec_stril_uc(void) {
197 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
198 // CHECK-BE-NEXT: ret <16 x i8>
199 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
200 // CHECK-LE-NEXT: ret <16 x i8>
201 return vec_stril(vuca);
202 }
203
test_vec_stril_sc(void)204 vector signed char test_vec_stril_sc(void) {
205 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
206 // CHECK-BE-NEXT: ret <16 x i8>
207 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
208 // CHECK-LE-NEXT: ret <16 x i8>
209 return vec_stril(vsca);
210 }
211
test_vec_stril_us(void)212 vector unsigned short test_vec_stril_us(void) {
213 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
214 // CHECK-BE-NEXT: ret <8 x i16>
215 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
216 // CHECK-LE-NEXT: ret <8 x i16>
217 return vec_stril(vusa);
218 }
219
test_vec_stril_ss(void)220 vector signed short test_vec_stril_ss(void) {
221 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
222 // CHECK-BE-NEXT: ret <8 x i16>
223 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
224 // CHECK-LE-NEXT: ret <8 x i16>
225 return vec_stril(vssa);
226 }
227
test_vec_stril_p_uc(void)228 int test_vec_stril_p_uc(void) {
229 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
230 // CHECK-BE-NEXT: ret i32
231 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
232 // CHECK-LE-NEXT: ret i32
233 return vec_stril_p(vuca);
234 }
235
test_vec_stril_p_sc(void)236 int test_vec_stril_p_sc(void) {
237 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
238 // CHECK-BE-NEXT: ret i32
239 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
240 // CHECK-LE-NEXT: ret i32
241 return vec_stril_p(vsca);
242 }
243
test_vec_stril_p_us(void)244 int test_vec_stril_p_us(void) {
245 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
246 // CHECK-BE-NEXT: ret i32
247 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
248 // CHECK-LE-NEXT: ret i32
249 return vec_stril_p(vusa);
250 }
251
test_vec_stril_p_ss(void)252 int test_vec_stril_p_ss(void) {
253 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
254 // CHECK-BE-NEXT: ret i32
255 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
256 // CHECK-LE-NEXT: ret i32
257 return vec_stril_p(vssa);
258 }
259
test_vec_stril_p_uc_2(vector unsigned char * ptr,int len)260 vector unsigned char test_vec_stril_p_uc_2(vector unsigned char *ptr, int len) {
261 // CHECK-BE: icmp slt i32
262 // CHECK-BE: br i1
263 // CHECK-BE: for.body:
264 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
265 // CHECK-BE: if.then:
266 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
267 // CHECK-BE: ret <16 x i8>
268 // CHECK-LE: icmp slt i32
269 // CHECK-LE: br i1
270 // CHECK-LE: for.body:
271 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
272 // CHECK-LE: if.then:
273 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
274 // CHECK-LE: ret <16 x i8>
275 for (int i = 0; i < len; i++) {
276 if (vec_stril_p(*(ptr + i))) {
277 return vec_stril(*(ptr + i));
278 }
279 }
280 return vec_stril(*(ptr));
281 }
282
test_vec_stril_p_sc_2(vector signed char * ptr,int len)283 vector signed char test_vec_stril_p_sc_2(vector signed char *ptr, int len) {
284 // CHECK-BE: icmp slt i32
285 // CHECK-BE: br i1
286 // CHECK-BE: for.body:
287 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
288 // CHECK-BE: if.then:
289 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
290 // CHECK-BE: ret <16 x i8>
291 // CHECK-LE: icmp slt i32
292 // CHECK-LE: br i1
293 // CHECK-LE: for.body:
294 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
295 // CHECK-LE: if.then:
296 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
297 // CHECK-LE: ret <16 x i8>
298 for (int i = 0; i < len; i++) {
299 if (vec_stril_p(*(ptr + i))) {
300 return vec_stril(*(ptr + i));
301 }
302 }
303 return vec_stril(*(ptr));
304 }
305
test_vec_stril_p_us_2(vector unsigned short * ptr,int len)306 vector unsigned short test_vec_stril_p_us_2(vector unsigned short *ptr, int len) {
307 // CHECK-BE: icmp slt i32
308 // CHECK-BE: br i1
309 // CHECK-BE: for.body:
310 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
311 // CHECK-BE: if.then:
312 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
313 // CHECK-BE: ret <8 x i16>
314 // CHECK-LE: icmp slt i32
315 // CHECK-LE: br i1
316 // CHECK-LE: for.body:
317 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
318 // CHECK-LE: if.then:
319 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
320 // CHECK-LE: ret <8 x i16>
321 for (int i = 0; i < len; i++) {
322 if (vec_stril_p(*(ptr + i))) {
323 return vec_stril(*(ptr + i));
324 }
325 }
326 return vec_stril(*(ptr));
327 }
328
test_vec_stril_p_ss_2(vector signed short * ptr,int len)329 vector signed short test_vec_stril_p_ss_2(vector signed short *ptr, int len) {
330 // CHECK-BE: icmp slt i32
331 // CHECK-BE: br i1
332 // CHECK-BE: for.body:
333 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
334 // CHECK-BE: if.then:
335 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
336 // CHECK-BE: ret <8 x i16>
337 // CHECK-LE: icmp slt i32
338 // CHECK-LE: br i1
339 // CHECK-LE: for.body:
340 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
341 // CHECK-LE: if.then:
342 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
343 // CHECK-LE: ret <8 x i16>
344 for (int i = 0; i < len; i++) {
345 if (vec_stril_p(*(ptr + i))) {
346 return vec_stril(*(ptr + i));
347 }
348 }
349 return vec_stril(*(ptr));
350 }
351
test_vec_strir_uc(void)352 vector unsigned char test_vec_strir_uc(void) {
353 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
354 // CHECK-BE-NEXT: ret <16 x i8>
355 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
356 // CHECK-LE-NEXT: ret <16 x i8>
357 return vec_strir(vuca);
358 }
359
test_vec_strir_sc(void)360 vector signed char test_vec_strir_sc(void) {
361 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
362 // CHECK-BE-NEXT: ret <16 x i8>
363 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
364 // CHECK-LE-NEXT: ret <16 x i8>
365 return vec_strir(vsca);
366 }
367
test_vec_strir_us(void)368 vector unsigned short test_vec_strir_us(void) {
369 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
370 // CHECK-BE-NEXT: ret <8 x i16>
371 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
372 // CHECK-LE-NEXT: ret <8 x i16>
373 return vec_strir(vusa);
374 }
375
test_vec_strir_ss(void)376 vector signed short test_vec_strir_ss(void) {
377 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
378 // CHECK-BE-NEXT: ret <8 x i16>
379 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
380 // CHECK-LE-NEXT: ret <8 x i16>
381 return vec_strir(vssa);
382 }
383
test_vec_strir_p_uc(void)384 int test_vec_strir_p_uc(void) {
385 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
386 // CHECK-BE-NEXT: ret i32
387 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
388 // CHECK-LE-NEXT: ret i32
389 return vec_strir_p(vuca);
390 }
391
test_vec_strir_p_sc(void)392 int test_vec_strir_p_sc(void) {
393 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
394 // CHECK-BE-NEXT: ret i32
395 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
396 // CHECK-LE-NEXT: ret i32
397 return vec_strir_p(vsca);
398 }
399
test_vec_strir_p_us(void)400 int test_vec_strir_p_us(void) {
401 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
402 // CHECK-BE-NEXT: ret i32
403 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
404 // CHECK-LE-NEXT: ret i32
405 return vec_strir_p(vusa);
406 }
407
test_vec_strir_p_ss(void)408 int test_vec_strir_p_ss(void) {
409 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
410 // CHECK-BE-NEXT: ret i32
411 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
412 // CHECK-LE-NEXT: ret i32
413 return vec_strir_p(vssa);
414 }
415
test_vec_strir_p_uc_2(vector unsigned char * ptr,int len)416 vector unsigned char test_vec_strir_p_uc_2(vector unsigned char *ptr, int len) {
417 // CHECK-BE: icmp slt i32
418 // CHECK-BE: br i1
419 // CHECK-BE: for.body:
420 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
421 // CHECK-BE: if.then:
422 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
423 // CHECK-BE: ret <16 x i8>
424 // CHECK-LE: icmp slt i32
425 // CHECK-LE: br i1
426 // CHECK-LE: for.body:
427 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
428 // CHECK-LE: if.then:
429 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
430 // CHECK-LE: ret <16 x i8>
431 for (int i = 0; i < len; i++) {
432 if (vec_strir_p(*(ptr + i))) {
433 return vec_strir(*(ptr + i));
434 }
435 }
436 return vec_strir(*(ptr));
437 }
438
test_vec_strir_p_sc_2(vector signed char * ptr,int len)439 vector signed char test_vec_strir_p_sc_2(vector signed char *ptr, int len) {
440 // CHECK-BE: icmp slt i32
441 // CHECK-BE: br i1
442 // CHECK-BE: for.body:
443 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
444 // CHECK-BE: if.then:
445 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
446 // CHECK-BE: ret <16 x i8>
447 // CHECK-LE: icmp slt i32
448 // CHECK-LE: br i1
449 // CHECK-LE: for.body:
450 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
451 // CHECK-LE: if.then:
452 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
453 // CHECK-LE: ret <16 x i8>
454 for (int i = 0; i < len; i++) {
455 if (vec_strir_p(*(ptr + i))) {
456 return vec_strir(*(ptr + i));
457 }
458 }
459 return vec_strir(*(ptr));
460 }
461
test_vec_strir_p_us_2(vector unsigned short * ptr,int len)462 vector unsigned short test_vec_strir_p_us_2(vector unsigned short *ptr, int len) {
463 // CHECK-BE: icmp slt i32
464 // CHECK-BE: br i1
465 // CHECK-BE: for.body:
466 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
467 // CHECK-BE: if.then:
468 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
469 // CHECK-BE: ret <8 x i16>
470 // CHECK-LE: icmp slt i32
471 // CHECK-LE: br i1
472 // CHECK-LE: for.body:
473 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
474 // CHECK-LE: if.then:
475 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
476 // CHECK-LE: ret <8 x i16>
477 for (int i = 0; i < len; i++) {
478 if (vec_strir_p(*(ptr + i))) {
479 return vec_strir(*(ptr + i));
480 }
481 }
482 return vec_strir(*(ptr));
483 }
484
test_vec_strir_p_ss_2(vector signed short * ptr,int len)485 vector signed short test_vec_strir_p_ss_2(vector signed short *ptr, int len) {
486 // CHECK-BE: icmp slt i32
487 // CHECK-BE: br i1
488 // CHECK-BE: for.body:
489 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
490 // CHECK-BE: if.then:
491 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
492 // CHECK-BE: ret <8 x i16>
493 // CHECK-LE: icmp slt i32
494 // CHECK-LE: br i1
495 // CHECK-LE: for.body:
496 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
497 // CHECK-LE: if.then:
498 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
499 // CHECK-LE: ret <8 x i16>
500 for (int i = 0; i < len; i++) {
501 if (vec_strir_p(*(ptr + i))) {
502 return vec_strir(*(ptr + i));
503 }
504 }
505 return vec_strir(*(ptr));
506 }
507
test_vec_extractm_uc(void)508 unsigned int test_vec_extractm_uc(void) {
509 // CHECK: @llvm.ppc.altivec.vextractbm(<16 x i8> %{{.+}})
510 // CHECK-NEXT: ret i32
511 return vec_extractm(vuca);
512 }
513
test_vec_extractm_us(void)514 unsigned int test_vec_extractm_us(void) {
515 // CHECK: @llvm.ppc.altivec.vextracthm(<8 x i16> %{{.+}})
516 // CHECK-NEXT: ret i32
517 return vec_extractm(vusa);
518 }
519
test_vec_extractm_ui(void)520 unsigned int test_vec_extractm_ui(void) {
521 // CHECK: @llvm.ppc.altivec.vextractwm(<4 x i32> %{{.+}})
522 // CHECK-NEXT: ret i32
523 return vec_extractm(vuia);
524 }
525
test_vec_extractm_ull(void)526 unsigned int test_vec_extractm_ull(void) {
527 // CHECK: @llvm.ppc.altivec.vextractdm(<2 x i64> %{{.+}})
528 // CHECK-NEXT: ret i32
529 return vec_extractm(vulla);
530 }
531
test_vec_extractm_u128(void)532 unsigned int test_vec_extractm_u128(void) {
533 // CHECK: @llvm.ppc.altivec.vextractqm(<1 x i128> %{{.+}})
534 // CHECK-NEXT: ret i32
535 return vec_extractm(vui128a);
536 }
537
test_vcfuged(void)538 vector unsigned long long test_vcfuged(void) {
539 // CHECK: @llvm.ppc.altivec.vcfuged(<2 x i64>
540 // CHECK-NEXT: ret <2 x i64>
541 return vec_cfuge(vulla, vullb);
542 }
543
test_vec_expandm_uc(void)544 vector unsigned char test_vec_expandm_uc(void) {
545 // CHECK: @llvm.ppc.altivec.vexpandbm(<16 x i8> %{{.+}})
546 // CHECK-NEXT: ret <16 x i8>
547 return vec_expandm(vuca);
548 }
549
test_vec_expandm_us(void)550 vector unsigned short test_vec_expandm_us(void) {
551 // CHECK: @llvm.ppc.altivec.vexpandhm(<8 x i16> %{{.+}})
552 // CHECK-NEXT: ret <8 x i16>
553 return vec_expandm(vusa);
554 }
555
test_vec_expandm_ui(void)556 vector unsigned int test_vec_expandm_ui(void) {
557 // CHECK: @llvm.ppc.altivec.vexpandwm(<4 x i32> %{{.+}})
558 // CHECK-NEXT: ret <4 x i32>
559 return vec_expandm(vuia);
560 }
561
test_vec_expandm_ull(void)562 vector unsigned long long test_vec_expandm_ull(void) {
563 // CHECK: @llvm.ppc.altivec.vexpanddm(<2 x i64> %{{.+}})
564 // CHECK-NEXT: ret <2 x i64>
565 return vec_expandm(vulla);
566 }
567
test_vec_expandm_u128(void)568 vector unsigned __int128 test_vec_expandm_u128(void) {
569 // CHECK: @llvm.ppc.altivec.vexpandqm(<1 x i128> %{{.+}})
570 // CHECK-NEXT: ret <1 x i128>
571 return vec_expandm(vui128a);
572 }
573
test_vec_cntm_uc(void)574 unsigned long long test_vec_cntm_uc(void) {
575 // CHECK: @llvm.ppc.altivec.vcntmbb(<16 x i8> %{{.+}}, i32
576 // CHECK-NEXT: ret i64
577 return vec_cntm(vuca, 1);
578 }
579
test_vec_cntm_us(void)580 unsigned long long test_vec_cntm_us(void) {
581 // CHECK: @llvm.ppc.altivec.vcntmbh(<8 x i16> %{{.+}}, i32
582 // CHECK-NEXT: ret i64
583 return vec_cntm(vusa, 0);
584 }
585
test_vec_cntm_ui(void)586 unsigned long long test_vec_cntm_ui(void) {
587 // CHECK: @llvm.ppc.altivec.vcntmbw(<4 x i32> %{{.+}}, i32
588 // CHECK-NEXT: ret i64
589 return vec_cntm(vuia, 1);
590 }
591
test_vec_cntm_ull(void)592 unsigned long long test_vec_cntm_ull(void) {
593 // CHECK: @llvm.ppc.altivec.vcntmbd(<2 x i64> %{{.+}}, i32
594 // CHECK-NEXT: ret i64
595 return vec_cntm(vulla, 0);
596 }
597
test_vec_genbm(void)598 vector unsigned char test_vec_genbm(void) {
599 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
600 // CHECK-NEXT: ret <16 x i8>
601 return vec_genbm(ulla);
602 }
603
test_vec_genbm_imm(void)604 vector unsigned char test_vec_genbm_imm(void) {
605 // CHECK: store i64 1
606 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
607 // CHECK-NEXT: ret <16 x i8>
608 return vec_genbm(1);
609 }
610
test_vec_genbm_imm2(void)611 vector unsigned char test_vec_genbm_imm2(void) {
612 // CHECK: store i64 255
613 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
614 // CHECK-NEXT: ret <16 x i8>
615 return vec_genbm(255);
616 }
617
test_vec_genbm_imm3(void)618 vector unsigned char test_vec_genbm_imm3(void) {
619 // CHECK: store i64 65535
620 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
621 // CHECK-NEXT: ret <16 x i8>
622 return vec_genbm(65535);
623 }
624
test_vec_genbm_imm4(void)625 vector unsigned char test_vec_genbm_imm4(void) {
626 // CHECK: store i64 65536
627 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
628 // CHECK-NEXT: ret <16 x i8>
629 return vec_genbm(65536);
630 }
631
test_vec_genbm_imm5(void)632 vector unsigned char test_vec_genbm_imm5(void) {
633 // CHECK: store i64 65546
634 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
635 // CHECK-NEXT: ret <16 x i8>
636 return vec_genbm(65546);
637 }
638
test_vec_genhm(void)639 vector unsigned short test_vec_genhm(void) {
640 // CHECK: @llvm.ppc.altivec.mtvsrhm(i64 %{{.+}})
641 // CHECK-NEXT: ret <8 x i16>
642 return vec_genhm(ulla);
643 }
644
test_vec_genwm(void)645 vector unsigned int test_vec_genwm(void) {
646 // CHECK: @llvm.ppc.altivec.mtvsrwm(i64 %{{.+}})
647 // CHECK-NEXT: ret <4 x i32>
648 return vec_genwm(ulla);
649 }
650
test_vec_gendm(void)651 vector unsigned long long test_vec_gendm(void) {
652 // CHECK: @llvm.ppc.altivec.mtvsrdm(i64 %{{.+}})
653 // CHECK-NEXT: ret <2 x i64>
654 return vec_gendm(ulla);
655 }
656
test_vec_genqm(void)657 vector unsigned __int128 test_vec_genqm(void) {
658 // CHECK: @llvm.ppc.altivec.mtvsrqm(i64 %{{.+}})
659 // CHECK-NEXT: ret <1 x i128>
660 return vec_genqm(ulla);
661 }
662
test_vgnb_1(void)663 unsigned long long test_vgnb_1(void) {
664 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 2)
665 // CHECK-NEXT: ret i64
666 return vec_gnb(vui128a, 2);
667 }
668
test_vgnb_2(void)669 unsigned long long test_vgnb_2(void) {
670 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 7)
671 // CHECK-NEXT: ret i64
672 return vec_gnb(vui128a, 7);
673 }
674
test_vgnb_3(void)675 unsigned long long test_vgnb_3(void) {
676 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 5)
677 // CHECK-NEXT: ret i64
678 return vec_gnb(vui128a, 5);
679 }
680
test_xxeval_uc(void)681 vector unsigned char test_xxeval_uc(void) {
682 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 0)
683 // CHECK: ret <16 x i8>
684 return vec_ternarylogic(vuca, vucb, vucc, 0);
685 }
686
test_xxeval_us(void)687 vector unsigned short test_xxeval_us(void) {
688 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 255)
689 // CHECK: ret <8 x i16>
690 return vec_ternarylogic(vusa, vusb, vusc, 255);
691 }
692
test_xxeval_ui(void)693 vector unsigned int test_xxeval_ui(void) {
694 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 150)
695 // CHECK: ret <4 x i32>
696 return vec_ternarylogic(vuia, vuib, vuic, 150);
697 }
698
test_xxeval_ull(void)699 vector unsigned long long test_xxeval_ull(void) {
700 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 1)
701 // CHECK: ret <2 x i64>
702 return vec_ternarylogic(vulla, vullb, vullc, 1);
703 }
704
test_xxeval_ui128(void)705 vector unsigned __int128 test_xxeval_ui128(void) {
706 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 246)
707 // CHECK: ret <1 x i128>
708 return vec_ternarylogic(vui128a, vui128b, vui128c, 246);
709 }
710
test_xxgenpcvbm(void)711 vector unsigned char test_xxgenpcvbm(void) {
712 // CHECK: @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %{{.+}}, i32
713 // CHECK-NEXT: ret <16 x i8>
714 return vec_genpcvm(vuca, 0);
715 }
716
test_xxgenpcvhm(void)717 vector unsigned short test_xxgenpcvhm(void) {
718 // CHECK: @llvm.ppc.vsx.xxgenpcvhm(<8 x i16> %{{.+}}, i32
719 // CHECK-NEXT: ret <8 x i16>
720 return vec_genpcvm(vusa, 0);
721 }
722
test_xxgenpcvwm(void)723 vector unsigned int test_xxgenpcvwm(void) {
724 // CHECK: @llvm.ppc.vsx.xxgenpcvwm(<4 x i32> %{{.+}}, i32
725 // CHECK-NEXT: ret <4 x i32>
726 return vec_genpcvm(vuia, 0);
727 }
728
test_xxgenpcvdm(void)729 vector unsigned long long test_xxgenpcvdm(void) {
730 // CHECK: @llvm.ppc.vsx.xxgenpcvdm(<2 x i64> %{{.+}}, i32
731 // CHECK-NEXT: ret <2 x i64>
732 return vec_genpcvm(vulla, 0);
733 }
734
test_vec_vclrl_sc(void)735 vector signed char test_vec_vclrl_sc(void) {
736 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
737 // CHECK-BE-NEXT: ret <16 x i8>
738 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
739 // CHECK-LE-NEXT: ret <16 x i8>
740 return vec_clrl(vsca, uia);
741 }
742
test_vec_clrl_uc(void)743 vector unsigned char test_vec_clrl_uc(void) {
744 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
745 // CHECK-BE-NEXT: ret <16 x i8>
746 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
747 // CHECK-LE-NEXT: ret <16 x i8>
748 return vec_clrl(vuca, uia);
749 }
750
test_vec_vclrr_sc(void)751 vector signed char test_vec_vclrr_sc(void) {
752 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
753 // CHECK-BE-NEXT: ret <16 x i8>
754 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
755 // CHECK-LE-NEXT: ret <16 x i8>
756 return vec_clrr(vsca, uia);
757 }
758
test_vec_clrr_uc(void)759 vector unsigned char test_vec_clrr_uc(void) {
760 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
761 // CHECK-BE-NEXT: ret <16 x i8>
762 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
763 // CHECK-LE-NEXT: ret <16 x i8>
764 return vec_clrr(vuca, uia);
765 }
766
test_vclzdm(void)767 vector unsigned long long test_vclzdm(void) {
768 // CHECK: @llvm.ppc.altivec.vclzdm(<2 x i64>
769 // CHECK-NEXT: ret <2 x i64>
770 return vec_cntlzm(vulla, vullb);
771 }
772
test_vctzdm(void)773 vector unsigned long long test_vctzdm(void) {
774 // CHECK: @llvm.ppc.altivec.vctzdm(<2 x i64>
775 // CHECK-NEXT: ret <2 x i64>
776 return vec_cnttzm(vulla, vullb);
777 }
778
test_vec_sldb_sc(void)779 vector signed char test_vec_sldb_sc(void) {
780 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
781 // CHECK-NEXT: ret <16 x i8>
782 return vec_sldb(vsca, vscb, 0);
783 }
784
test_vec_sldb_uc(void)785 vector unsigned char test_vec_sldb_uc(void) {
786 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
787 // CHECK-NEXT: ret <16 x i8>
788 return vec_sldb(vuca, vucb, 1);
789 }
790
test_vec_sldb_ss(void)791 vector signed short test_vec_sldb_ss(void) {
792 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
793 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
794 // CHECK-NEXT: ret <8 x i16>
795 return vec_sldb(vssa, vssb, 2);
796 }
797
test_vec_sldb_us(void)798 vector unsigned short test_vec_sldb_us(void) {
799 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
800 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
801 // CHECK-NEXT: ret <8 x i16>
802 return vec_sldb(vusa, vusb, 3);
803 }
804
test_vec_sldb_si(void)805 vector signed int test_vec_sldb_si(void) {
806 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
807 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
808 // CHECK-NEXT: ret <4 x i32>
809 return vec_sldb(vsia, vsib, 4);
810 }
811
test_vec_sldb_ui(void)812 vector unsigned int test_vec_sldb_ui(void) {
813 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
814 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
815 // CHECK-NEXT: ret <4 x i32>
816 return vec_sldb(vuia, vuib, 5);
817 }
818
test_vec_sldb_sll(void)819 vector signed long long test_vec_sldb_sll(void) {
820 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
821 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
822 // CHECK-NEXT: ret <2 x i64>
823 return vec_sldb(vslla, vsllb, 6);
824 }
825
test_vec_sldb_ull(void)826 vector unsigned long long test_vec_sldb_ull(void) {
827 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
828 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
829 // CHECK-NEXT: ret <2 x i64>
830 return vec_sldb(vulla, vullb, 7);
831 }
832
test_vec_srdb_sc(void)833 vector signed char test_vec_srdb_sc(void) {
834 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
835 // CHECK-NEXT: ret <16 x i8>
836 return vec_srdb(vsca, vscb, 8);
837 }
838
test_vec_srdb_uc(void)839 vector unsigned char test_vec_srdb_uc(void) {
840 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
841 // CHECK-NEXT: ret <16 x i8>
842 return vec_srdb(vuca, vucb, 9);
843 }
844
test_vec_srdb_ss(void)845 vector signed short test_vec_srdb_ss(void) {
846 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
847 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
848 // CHECK-NEXT: ret <8 x i16>
849 return vec_srdb(vssa, vssb, 10);
850 }
851
test_vec_srdb_us(void)852 vector unsigned short test_vec_srdb_us(void) {
853 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
854 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
855 // CHECK-NEXT: ret <8 x i16>
856 return vec_srdb(vusa, vusb, 3);
857 }
858
test_vec_srdb_si(void)859 vector signed int test_vec_srdb_si(void) {
860 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
861 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
862 // CHECK-NEXT: ret <4 x i32>
863 return vec_srdb(vsia, vsib, 4);
864 }
865
test_vec_srdb_ui(void)866 vector unsigned int test_vec_srdb_ui(void) {
867 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
868 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
869 // CHECK-NEXT: ret <4 x i32>
870 return vec_srdb(vuia, vuib, 5);
871 }
872
test_vec_srdb_sll(void)873 vector signed long long test_vec_srdb_sll(void) {
874 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
875 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
876 // CHECK-NEXT: ret <2 x i64>
877 return vec_srdb(vslla, vsllb, 6);
878 }
879
test_vec_srdb_ull(void)880 vector unsigned long long test_vec_srdb_ull(void) {
881 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
882 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
883 // CHECK-NEXT: ret <2 x i64>
884 return vec_srdb(vulla, vullb, 7);
885 }
886
test_vec_permx_sc(void)887 vector signed char test_vec_permx_sc(void) {
888 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
889 // CHECK-NEXT: ret <16 x i8>
890 return vec_permx(vsca, vscb, vucc, 0);
891 }
892
test_vec_permx_uc(void)893 vector unsigned char test_vec_permx_uc(void) {
894 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
895 // CHECK-NEXT: ret <16 x i8>
896 return vec_permx(vuca, vucb, vucc, 1);
897 }
898
test_vec_permx_ss(void)899 vector signed short test_vec_permx_ss(void) {
900 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
901 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
902 // CHECK-NEXT: ret <8 x i16>
903 return vec_permx(vssa, vssb, vucc, 2);
904 }
905
test_vec_permx_us(void)906 vector unsigned short test_vec_permx_us(void) {
907 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
908 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
909 // CHECK-NEXT: ret <8 x i16>
910 return vec_permx(vusa, vusb, vucc, 3);
911 }
912
test_vec_permx_si(void)913 vector signed int test_vec_permx_si(void) {
914 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
915 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
916 // CHECK-NEXT: ret <4 x i32>
917 return vec_permx(vsia, vsib, vucc, 4);
918 }
919
test_vec_permx_ui(void)920 vector unsigned int test_vec_permx_ui(void) {
921 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
922 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
923 // CHECK-NEXT: ret <4 x i32>
924 return vec_permx(vuia, vuib, vucc, 5);
925 }
926
test_vec_permx_sll(void)927 vector signed long long test_vec_permx_sll(void) {
928 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
929 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
930 // CHECK-NEXT: ret <2 x i64>
931 return vec_permx(vslla, vsllb, vucc, 6);
932 }
933
test_vec_permx_ull(void)934 vector unsigned long long test_vec_permx_ull(void) {
935 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
936 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
937 // CHECK-NEXT: ret <2 x i64>
938 return vec_permx(vulla, vullb, vucc, 7);
939 }
940
test_vec_permx_f(void)941 vector float test_vec_permx_f(void) {
942 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
943 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x float>
944 // CHECK-NEXT: ret <4 x float>
945 return vec_permx(vfa, vfb, vucc, 0);
946 }
947
test_vec_permx_d(void)948 vector double test_vec_permx_d(void) {
949 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
950 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x double>
951 // CHECK-NEXT: ret <2 x double>
952 return vec_permx(vda, vdb, vucc, 1);
953 }
954
test_vec_blend_sc(void)955 vector signed char test_vec_blend_sc(void) {
956 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
957 // CHECK-NEXT: ret <16 x i8>
958 return vec_blendv(vsca, vscb, vucc);
959 }
960
test_vec_blend_uc(void)961 vector unsigned char test_vec_blend_uc(void) {
962 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
963 // CHECK-NEXT: ret <16 x i8>
964 return vec_blendv(vuca, vucb, vucc);
965 }
966
test_vec_blend_ss(void)967 vector signed short test_vec_blend_ss(void) {
968 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
969 // CHECK-NEXT: ret <8 x i16>
970 return vec_blendv(vssa, vssb, vusc);
971 }
972
test_vec_blend_us(void)973 vector unsigned short test_vec_blend_us(void) {
974 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
975 // CHECK-NEXT: ret <8 x i16>
976 return vec_blendv(vusa, vusb, vusc);
977 }
978
test_vec_blend_si(void)979 vector signed int test_vec_blend_si(void) {
980 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
981 // CHECK-NEXT: ret <4 x i32>
982 return vec_blendv(vsia, vsib, vuic);
983 }
984
test_vec_blend_ui(void)985 vector unsigned int test_vec_blend_ui(void) {
986 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
987 // CHECK-NEXT: ret <4 x i32>
988 return vec_blendv(vuia, vuib, vuic);
989 }
990
test_vec_blend_sll(void)991 vector signed long long test_vec_blend_sll(void) {
992 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
993 // CHECK-NEXT: ret <2 x i64>
994 return vec_blendv(vslla, vsllb, vullc);
995 }
996
test_vec_blend_ull(void)997 vector unsigned long long test_vec_blend_ull(void) {
998 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
999 // CHECK-NEXT: ret <2 x i64>
1000 return vec_blendv(vulla, vullb, vullc);
1001 }
1002
test_vec_blend_f(void)1003 vector float test_vec_blend_f(void) {
1004 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
1005 // CHECK-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1006 // CHECK-NEXT: ret <4 x float>
1007 return vec_blendv(vfa, vfb, vuic);
1008 }
1009
test_vec_blend_d(void)1010 vector double test_vec_blend_d(void) {
1011 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
1012 // CHECK-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1013 // CHECK-NEXT: ret <2 x double>
1014 return vec_blendv(vda, vdb, vullc);
1015 }
1016
test_vec_replace_elt_si(void)1017 vector signed int test_vec_replace_elt_si(void) {
1018 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 0
1019 // CHECK-BE-NEXT: ret <4 x i32>
1020 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 12
1021 // CHECK-LE-NEXT: ret <4 x i32>
1022 return vec_replace_elt(vsia, sia, 0);
1023 }
1024
test_vec_replace_elt_ui(void)1025 vector unsigned int test_vec_replace_elt_ui(void) {
1026 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 4
1027 // CHECK-BE-NEXT: ret <4 x i32>
1028 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 8
1029 // CHECK-LE-NEXT: ret <4 x i32>
1030 return vec_replace_elt(vuia, uia, 1);
1031 }
1032
test_vec_replace_elt_f(void)1033 vector float test_vec_replace_elt_f(void) {
1034 // CHECK-BE: bitcast float %{{.+}} to i32
1035 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 8
1036 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1037 // CHECK-BE-NEXT: ret <4 x float>
1038 // CHECK-LE: bitcast float %{{.+}} to i32
1039 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 4
1040 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1041 // CHECK-LE-NEXT: ret <4 x float>
1042 return vec_replace_elt(vfa, fa, 2);
1043 }
1044
test_vec_replace_elt_sll(void)1045 vector signed long long test_vec_replace_elt_sll(void) {
1046 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1047 // CHECK-BE-NEXT: ret <2 x i64>
1048 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1049 // CHECK-LE-NEXT: ret <2 x i64>
1050 return vec_replace_elt(vslla, llb, 0);
1051 }
1052
test_vec_replace_elt_ull(void)1053 vector unsigned long long test_vec_replace_elt_ull(void) {
1054 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1055 // CHECK-BE-NEXT: ret <2 x i64>
1056 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1057 // CHECK-LE-NEXT: ret <2 x i64>
1058 return vec_replace_elt(vulla, ulla, 0);
1059 }
1060
test_vec_replace_elt_d(void)1061 vector double test_vec_replace_elt_d(void) {
1062 // CHECK-BE: bitcast double %{{.+}} to i64
1063 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1064 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1065 // CHECK-BE-NEXT: ret <2 x double>
1066 // CHECK-LE: bitcast double %{{.+}} to i64
1067 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1068 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1069 // CHECK-LE-NEXT: ret <2 x double>
1070 return vec_replace_elt(vda, da, 1);
1071 }
1072
test_vec_replace_unaligned_si(void)1073 vector unsigned char test_vec_replace_unaligned_si(void) {
1074 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 6
1075 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1076 // CHECK-BE-NEXT: ret <16 x i8>
1077 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 6
1078 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1079 // CHECK-LE-NEXT: ret <16 x i8>
1080 return vec_replace_unaligned(vsia, sia, 6);
1081 }
1082
test_vec_replace_unaligned_ui(void)1083 vector unsigned char test_vec_replace_unaligned_ui(void) {
1084 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 8
1085 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1086 // CHECK-BE-NEXT: ret <16 x i8>
1087 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 4
1088 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1089 // CHECK-LE-NEXT: ret <16 x i8>
1090 return vec_replace_unaligned(vuia, uia, 8);
1091 }
1092
test_vec_replace_unaligned_f(void)1093 vector unsigned char test_vec_replace_unaligned_f(void) {
1094 // CHECK-BE: bitcast float %{{.+}} to i32
1095 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 12
1096 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1097 // CHECK-BE-NEXT: ret <16 x i8>
1098 // CHECK-LE: bitcast float %{{.+}} to i32
1099 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 0
1100 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1101 // CHECK-LE-NEXT: ret <16 x i8>
1102 return vec_replace_unaligned(vfa, fa, 12);
1103 }
1104
test_vec_replace_unaligned_sll(void)1105 vector unsigned char test_vec_replace_unaligned_sll(void) {
1106 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 6
1107 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1108 // CHECK-BE-NEXT: ret <16 x i8>
1109 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 2
1110 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1111 // CHECK-LE-NEXT: ret <16 x i8>
1112 return vec_replace_unaligned(vslla, llb, 6);
1113 }
1114
test_vec_replace_unaligned_ull(void)1115 vector unsigned char test_vec_replace_unaligned_ull(void) {
1116 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 7
1117 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1118 // CHECK-BE-NEXT: ret <16 x i8>
1119 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 1
1120 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1121 // CHECK-LE-NEXT: ret <16 x i8>
1122 return vec_replace_unaligned(vulla, ulla, 7);
1123 }
1124
test_vec_replace_unaligned_d(void)1125 vector unsigned char test_vec_replace_unaligned_d(void) {
1126 // CHECK-BE: bitcast double %{{.+}} to i64
1127 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1128 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1129 // CHECK-BE-NEXT: ret <16 x i8>
1130 // CHECK-LE: bitcast double %{{.+}} to i64
1131 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1132 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1133 // CHECK-LE-NEXT: ret <16 x i8>
1134 return vec_replace_unaligned(vda, da, 8);
1135 }
1136
test_vec_insertl_uc(void)1137 vector unsigned char test_vec_insertl_uc(void) {
1138 // CHECK-BE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1139 // CHECK-BE-NEXT: ret <16 x i8>
1140 // CHECK-LE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1141 // CHECK-LE-NEXT: ret <16 x i8>
1142 return vec_insertl(uca, vuca, uia);
1143 }
1144
test_vec_insertl_us(void)1145 vector unsigned short test_vec_insertl_us(void) {
1146 // CHECK-BE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1147 // CHECK-BE-NEXT: ret <8 x i16>
1148 // CHECK-LE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1149 // CHECK-LE-NEXT: ret <8 x i16>
1150 return vec_insertl(usa, vusa, uia);
1151 }
1152
test_vec_insertl_ui(void)1153 vector unsigned int test_vec_insertl_ui(void) {
1154 // CHECK-BE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1155 // CHECK-BE-NEXT: ret <4 x i32>
1156 // CHECK-LE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1157 // CHECK-LE-NEXT: ret <4 x i32>
1158 return vec_insertl(uib, vuia, uia);
1159 }
1160
test_vec_insertl_ul(void)1161 vector unsigned long long test_vec_insertl_ul(void) {
1162 // CHECK-BE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1163 // CHECK-BE-NEXT: ret <2 x i64>
1164 // CHECK-LE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1165 // CHECK-LE-NEXT: ret <2 x i64>
1166 return vec_insertl(ulla, vulla, uia);
1167 }
1168
test_vec_insertl_ucv(void)1169 vector unsigned char test_vec_insertl_ucv(void) {
1170 // CHECK-BE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1171 // CHECK-BE-NEXT: ret <16 x i8>
1172 // CHECK-LE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1173 // CHECK-LE-NEXT: ret <16 x i8>
1174 return vec_insertl(vuca, vucb, uia);
1175 }
1176
test_vec_insertl_usv(void)1177 vector unsigned short test_vec_insertl_usv(void) {
1178 // CHECK-BE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1179 // CHECK-BE-NEXT: ret <8 x i16>
1180 // CHECK-LE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1181 // CHECK-LE-NEXT: ret <8 x i16>
1182 return vec_insertl(vusa, vusb, uia);
1183 }
1184
test_vec_insertl_uiv(void)1185 vector unsigned int test_vec_insertl_uiv(void) {
1186 // CHECK-BE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1187 // CHECK-BE-NEXT: ret <4 x i32>
1188 // CHECK-LE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1189 // CHECK-LE-NEXT: ret <4 x i32>
1190 return vec_insertl(vuia, vuib, uia);
1191 }
1192
test_vec_inserth_uc(void)1193 vector unsigned char test_vec_inserth_uc(void) {
1194 // CHECK-BE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1195 // CHECK-BE-NEXT: ret <16 x i8>
1196 // CHECK-LE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1197 // CHECK-LE-NEXT: ret <16 x i8>
1198 return vec_inserth(uca, vuca, uia);
1199 }
1200
test_vec_inserth_us(void)1201 vector unsigned short test_vec_inserth_us(void) {
1202 // CHECK-BE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1203 // CHECK-BE-NEXT: ret <8 x i16>
1204 // CHECK-LE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1205 // CHECK-LE-NEXT: ret <8 x i16>
1206 return vec_inserth(usa, vusa, uia);
1207 }
1208
test_vec_inserth_ui(void)1209 vector unsigned int test_vec_inserth_ui(void) {
1210 // CHECK-BE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1211 // CHECK-BE-NEXT: ret <4 x i32>
1212 // CHECK-LE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1213 // CHECK-LE-NEXT: ret <4 x i32>
1214 return vec_inserth(uib, vuia, uia);
1215 }
1216
test_vec_inserth_ul(void)1217 vector unsigned long long test_vec_inserth_ul(void) {
1218 // CHECK-BE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1219 // CHECK-BE-NEXT: ret <2 x i64>
1220 // CHECK-LE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1221 // CHECK-LE-NEXT: ret <2 x i64>
1222 return vec_inserth(ulla, vulla, uia);
1223 }
1224
test_vec_inserth_ucv(void)1225 vector unsigned char test_vec_inserth_ucv(void) {
1226 // CHECK-BE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1227 // CHECK-BE-NEXT: ret <16 x i8>
1228 // CHECK-LE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1229 // CHECK-LE-NEXT: ret <16 x i8>
1230 return vec_inserth(vuca, vucb, uia);
1231 }
1232
test_vec_inserth_usv(void)1233 vector unsigned short test_vec_inserth_usv(void) {
1234 // CHECK-BE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1235 // CHECK-BE-NEXT: ret <8 x i16>
1236 // CHECK-LE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1237 // CHECK-LE-NEXT: ret <8 x i16>
1238 return vec_inserth(vusa, vusb, uia);
1239 }
1240
test_vec_inserth_uiv(void)1241 vector unsigned int test_vec_inserth_uiv(void) {
1242 // CHECK-BE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1243 // CHECK-BE-NEXT: ret <4 x i32>
1244 // CHECK-LE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1245 // CHECK-LE-NEXT: ret <4 x i32>
1246 return vec_inserth(vuia, vuib, uia);
1247 }
1248
test_vec_extractl_uc(void)1249 vector unsigned long long test_vec_extractl_uc(void) {
1250 // CHECK-BE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1251 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1252 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1253 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1254 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1255 // CHECK-BE: ret <2 x i64>
1256 // CHECK-LE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1257 // CHECK-LE-NEXT: ret <2 x i64>
1258 return vec_extractl(vuca, vucb, uia);
1259 }
1260
test_vec_extractl_us(void)1261 vector unsigned long long test_vec_extractl_us(void) {
1262 // CHECK-BE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1263 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1264 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1265 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1266 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1267 // CHECK-BE: ret <2 x i64>
1268 // CHECK-LE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1269 // CHECK-LE-NEXT: ret <2 x i64>
1270 return vec_extractl(vusa, vusb, uia);
1271 }
1272
test_vec_extractl_ui(void)1273 vector unsigned long long test_vec_extractl_ui(void) {
1274 // CHECK-BE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1275 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1276 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1277 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1278 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1279 // CHECK-BE: ret <2 x i64>
1280 // CHECK-LE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1281 // CHECK-LE-NEXT: ret <2 x i64>
1282 return vec_extractl(vuia, vuib, uia);
1283 }
1284
test_vec_extractl_ul(void)1285 vector unsigned long long test_vec_extractl_ul(void) {
1286 // CHECK-BE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1287 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1288 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1289 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1290 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1291 // CHECK-BE: ret <2 x i64>
1292 // CHECK-LE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1293 // CHECK-LE-NEXT: ret <2 x i64>
1294 return vec_extractl(vulla, vullb, uia);
1295 }
1296
test_vec_extracth_uc(void)1297 vector unsigned long long test_vec_extracth_uc(void) {
1298 // CHECK-BE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1299 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1300 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1301 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1302 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1303 // CHECK-BE: ret <2 x i64>
1304 // CHECK-LE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1305 // CHECK-LE-NEXT: ret <2 x i64>
1306 return vec_extracth(vuca, vucb, uia);
1307 }
1308
test_vec_extracth_us(void)1309 vector unsigned long long test_vec_extracth_us(void) {
1310 // CHECK-BE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1311 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1312 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1313 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1314 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1315 // CHECK-BE: ret <2 x i64>
1316 // CHECK-LE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1317 // CHECK-LE-NEXT: ret <2 x i64>
1318 return vec_extracth(vusa, vusb, uia);
1319 }
1320
test_vec_extracth_ui(void)1321 vector unsigned long long test_vec_extracth_ui(void) {
1322 // CHECK-BE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1323 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1324 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1325 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1326 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1327 // CHECK-BE: ret <2 x i64>
1328 // CHECK-LE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1329 // CHECK-LE-NEXT: ret <2 x i64>
1330 return vec_extracth(vuia, vuib, uia);
1331 }
1332
test_vec_extracth_ul(void)1333 vector unsigned long long test_vec_extracth_ul(void) {
1334 // CHECK-BE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1335 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1336 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1337 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1338 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1339 // CHECK-BE: ret <2 x i64>
1340 // CHECK-LE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1341 // CHECK-LE-NEXT: ret <2 x i64>
1342 return vec_extracth(vulla, vullb, uia);
1343 }
1344
test_vec_vec_splati_si(void)1345 vector signed int test_vec_vec_splati_si(void) {
1346 // CHECK: ret <4 x i32> <i32 -17, i32 -17, i32 -17, i32 -17>
1347 return vec_splati(-17);
1348 }
1349
test_vec_vec_splati_ui(void)1350 vector unsigned int test_vec_vec_splati_ui(void) {
1351 // CHECK: ret <4 x i32> <i32 16, i32 16, i32 16, i32 16>
1352 return vec_splati(16U);
1353 }
1354
test_vec_vec_splati_f(void)1355 vector float test_vec_vec_splati_f(void) {
1356 // CHECK: ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
1357 return vec_splati(1.0f);
1358 }
1359
test_vec_vec_splatid(void)1360 vector double test_vec_vec_splatid(void) {
1361 // CHECK-BE: [[T1:%.+]] = fpext float %{{.+}} to double
1362 // CHECK-BE-NEXT: [[T2:%.+]] = insertelement <2 x double> poison, double [[T1:%.+]], i32 0
1363 // CHECK-BE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> poison, <2 x i32> zeroinitialize
1364 // CHECK-BE-NEXT: ret <2 x double> [[T3:%.+]]
1365 // CHECK-LE: [[T1:%.+]] = fpext float %{{.+}} to double
1366 // CHECK-LE-NEXT: [[T2:%.+]] = insertelement <2 x double> poison, double [[T1:%.+]], i32 0
1367 // CHECK-LE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> poison, <2 x i32> zeroinitialize
1368 // CHECK-LE-NEXT: ret <2 x double> [[T3:%.+]]
1369 return vec_splatid(1.0);
1370 }
1371
test_vec_vec_splati_ins_si(void)1372 vector signed int test_vec_vec_splati_ins_si(void) {
1373 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1374 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1375 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1376 // CHECK-BE: ret <4 x i32>
1377 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1378 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1379 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1380 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1381 // CHECK-LE: ret <4 x i32>
1382 return vec_splati_ins(vsia, 0, -17);
1383 }
1384
test_vec_vec_splati_ins_ui(void)1385 vector unsigned int test_vec_vec_splati_ins_ui(void) {
1386 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1387 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1388 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1389 // CHECK-BE: ret <4 x i32>
1390 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1391 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1392 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1393 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1394 // CHECK-LE: ret <4 x i32>
1395 return vec_splati_ins(vuia, 1, 16U);
1396 }
1397
test_vec_vec_splati_ins_f(void)1398 vector float test_vec_vec_splati_ins_f(void) {
1399 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 %{{.+}}
1400 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1401 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
1402 // CHECK-BE: ret <4 x float>
1403 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1404 // CHECK-LE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
1405 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1406 // CHECK-LE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T2]]
1407 // CHECK-LE: ret <4 x float>
1408 return vec_splati_ins(vfa, 0, 1.0f);
1409 }
1410
test_vec_xst_trunc_sc(vector signed __int128 __a,signed long long __b,signed char * __c)1411 void test_vec_xst_trunc_sc(vector signed __int128 __a, signed long long __b,
1412 signed char *__c) {
1413 // CHECK: store i8 %{{.+}}, i8* %{{.+}}, align 1
1414 vec_xst_trunc(__a, __b, __c);
1415 }
1416
test_vec_xst_trunc_uc(vector unsigned __int128 __a,signed long long __b,unsigned char * __c)1417 void test_vec_xst_trunc_uc(vector unsigned __int128 __a, signed long long __b,
1418 unsigned char *__c) {
1419 // CHECK: store i8 %{{.+}}, i8* %{{.+}}, align 1
1420 vec_xst_trunc(__a, __b, __c);
1421 }
1422
test_vec_xst_trunc_ss(vector signed __int128 __a,signed long long __b,signed short * __c)1423 void test_vec_xst_trunc_ss(vector signed __int128 __a, signed long long __b,
1424 signed short *__c) {
1425 // CHECK: store i16 %{{.+}}, i16* %{{.+}}, align 2
1426 vec_xst_trunc(__a, __b, __c);
1427 }
1428
test_vec_xst_trunc_us(vector unsigned __int128 __a,signed long long __b,unsigned short * __c)1429 void test_vec_xst_trunc_us(vector unsigned __int128 __a, signed long long __b,
1430 unsigned short *__c) {
1431 // CHECK: store i16 %{{.+}}, i16* %{{.+}}, align 2
1432 vec_xst_trunc(__a, __b, __c);
1433 }
1434
test_vec_xst_trunc_si(vector signed __int128 __a,signed long long __b,signed int * __c)1435 void test_vec_xst_trunc_si(vector signed __int128 __a, signed long long __b,
1436 signed int *__c) {
1437 // CHECK: store i32 %{{.+}}, i32* %{{.+}}, align 4
1438 vec_xst_trunc(__a, __b, __c);
1439 }
1440
test_vec_xst_trunc_ui(vector unsigned __int128 __a,signed long long __b,unsigned int * __c)1441 void test_vec_xst_trunc_ui(vector unsigned __int128 __a, signed long long __b,
1442 unsigned int *__c) {
1443 // CHECK: store i32 %{{.+}}, i32* %{{.+}}, align 4
1444 vec_xst_trunc(__a, __b, __c);
1445 }
1446
test_vec_xst_trunc_sll(vector signed __int128 __a,signed long long __b,signed long long * __c)1447 void test_vec_xst_trunc_sll(vector signed __int128 __a, signed long long __b,
1448 signed long long *__c) {
1449 // CHECK: store i64 %{{.+}}, i64* %{{.+}}, align 8
1450 vec_xst_trunc(__a, __b, __c);
1451 }
1452
test_vec_xst_trunc_ull(vector unsigned __int128 __a,signed long long __b,unsigned long long * __c)1453 void test_vec_xst_trunc_ull(vector unsigned __int128 __a, signed long long __b,
1454 unsigned long long *__c) {
1455 // CHECK: store i64 %{{.+}}, i64* %{{.+}}, align 8
1456 vec_xst_trunc(__a, __b, __c);
1457 }
1458
test_vec_slq_unsigned(void)1459 vector unsigned __int128 test_vec_slq_unsigned (void) {
1460 // CHECK-LABEL: test_vec_slq_unsigned
1461 // CHECK: shl <1 x i128> %{{.+}}, %{{.+}}
1462 // CHECK: ret <1 x i128> %{{.+}}
1463 return vec_sl(vui128a, vui128b);
1464 }
1465
test_vec_slq_signed(void)1466 vector signed __int128 test_vec_slq_signed (void) {
1467 // CHECK-LABEL: test_vec_slq_signed
1468 // CHECK: shl <1 x i128> %{{.+}}, %{{.+}}
1469 // CHECK: ret <1 x i128>
1470 return vec_sl(vi128a, vui128a);
1471 }
1472
test_vec_srq_unsigned(void)1473 vector unsigned __int128 test_vec_srq_unsigned (void) {
1474 // CHECK-LABEL: test_vec_srq_unsigned
1475 // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}}
1476 // CHECK: ret <1 x i128>
1477 return vec_sr(vui128a, vui128b);
1478 }
1479
test_vec_srq_signed(void)1480 vector signed __int128 test_vec_srq_signed (void) {
1481 // CHECK-LABEL: test_vec_srq_signed
1482 // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}}
1483 // CHECK: ret <1 x i128>
1484 return vec_sr(vi128a, vui128a);
1485 }
1486
test_vec_sraq_unsigned(void)1487 vector unsigned __int128 test_vec_sraq_unsigned (void) {
1488 // CHECK-LABEL: test_vec_sraq_unsigned
1489 // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}}
1490 // CHECK: ret <1 x i128>
1491 return vec_sra(vui128a, vui128b);
1492 }
1493
test_vec_sraq_signed(void)1494 vector signed __int128 test_vec_sraq_signed (void) {
1495 // CHECK-LABEL: test_vec_sraq_signed
1496 // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}}
1497 // CHECK: ret <1 x i128>
1498 return vec_sra(vi128a, vui128a);
1499 }
1500
1501
test_vec_test_lsbb_all_ones(void)1502 int test_vec_test_lsbb_all_ones(void) {
1503 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i32 1
1504 // CHECK-NEXT: ret i32
1505 return vec_test_lsbb_all_ones(vuca);
1506 }
1507
test_vec_test_lsbb_all_zeros(void)1508 int test_vec_test_lsbb_all_zeros(void) {
1509 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i32 0
1510 // CHECK-NEXT: ret i32
1511 return vec_test_lsbb_all_zeros(vuca);
1512 }
1513
test_vec_mule_u128(void)1514 vector unsigned __int128 test_vec_mule_u128(void) {
1515 // CHECK-BE: @llvm.ppc.altivec.vmuleud(<2 x i64>
1516 // CHECK-BE-NEXT: ret <1 x i128>
1517 // CHECK-LE: @llvm.ppc.altivec.vmuloud(<2 x i64>
1518 // CHECK-LE-NEXT: ret <1 x i128>
1519 return vec_mule(vulla, vullb);
1520 }
1521
test_vec_mule_s128(void)1522 vector signed __int128 test_vec_mule_s128(void) {
1523 // CHECK-BE: @llvm.ppc.altivec.vmulesd(<2 x i64>
1524 // CHECK-BE-NEXT: ret <1 x i128>
1525 // CHECK-LE: @llvm.ppc.altivec.vmulosd(<2 x i64>
1526 // CHECK-LE-NEXT: ret <1 x i128>
1527 return vec_mule(vslla, vsllb);
1528 }
1529
test_vec_mulo_u128(void)1530 vector unsigned __int128 test_vec_mulo_u128(void) {
1531 // CHECK-BE: @llvm.ppc.altivec.vmuloud(<2 x i64>
1532 // CHECK-BE-NEXT: ret <1 x i128>
1533 // CHECK-LE: @llvm.ppc.altivec.vmuleud(<2 x i64>
1534 // CHECK-LE-NEXT: ret <1 x i128>
1535 return vec_mulo(vulla, vullb);
1536 }
1537
test_vec_mulo_s128(void)1538 vector signed __int128 test_vec_mulo_s128(void) {
1539 // CHECK-BE: @llvm.ppc.altivec.vmulosd(<2 x i64>
1540 // CHECK-BE-NEXT: ret <1 x i128>
1541 // CHECK-LE: @llvm.ppc.altivec.vmulesd(<2 x i64>
1542 // CHECK-LE-NEXT: ret <1 x i128>
1543 return vec_mulo(vslla, vsllb);
1544 }
1545
test_vec_msumc_u128(void)1546 vector unsigned __int128 test_vec_msumc_u128(void) {
1547 // CHECK: @llvm.ppc.altivec.vmsumcud(<2 x i64>
1548 // CHECK-NEXT: ret <1 x i128>
1549 return vec_msumc(vulla, vullb, vui128a);
1550 }
1551
test_vec_xl_sext_i8(void)1552 vector signed __int128 test_vec_xl_sext_i8(void) {
1553 // CHECK: load i8
1554 // CHECK: sext i8
1555 // CHECK: ret <1 x i128>
1556 return vec_xl_sext(llb, cap);
1557 }
1558
test_vec_xl_sext_i16(void)1559 vector signed __int128 test_vec_xl_sext_i16(void) {
1560 // CHECK: load i16
1561 // CHECK: sext i16
1562 // CHECK: ret <1 x i128>
1563 return vec_xl_sext(llb, sap);
1564 }
1565
test_vec_xl_sext_i32(void)1566 vector signed __int128 test_vec_xl_sext_i32(void) {
1567 // CHECK: load i32
1568 // CHECK: sext i32
1569 // CHECK: ret <1 x i128>
1570 return vec_xl_sext(llb, iap);
1571 }
1572
test_vec_xl_sext_i64(void)1573 vector signed __int128 test_vec_xl_sext_i64(void) {
1574 // CHECK: load i64
1575 // CHECK: sext i64
1576 // CHECK: ret <1 x i128>
1577 return vec_xl_sext(llb, llap);
1578 }
1579
test_vec_xl_zext_i8(void)1580 vector unsigned __int128 test_vec_xl_zext_i8(void) {
1581 // CHECK: load i8
1582 // CHECK: zext i8
1583 // CHECK: ret <1 x i128>
1584 return vec_xl_zext(llb, ucap);
1585 }
1586
test_vec_xl_zext_i16(void)1587 vector unsigned __int128 test_vec_xl_zext_i16(void) {
1588 // CHECK: load i16
1589 // CHECK: zext i16
1590 // CHECK: ret <1 x i128>
1591 return vec_xl_zext(llb, usap);
1592 }
1593
test_vec_xl_zext_i32(void)1594 vector unsigned __int128 test_vec_xl_zext_i32(void) {
1595 // CHECK: load i32
1596 // CHECK: zext i32
1597 // CHECK: ret <1 x i128>
1598 return vec_xl_zext(llb, uiap);
1599 }
1600
test_vec_xl_zext_i64(void)1601 vector unsigned __int128 test_vec_xl_zext_i64(void) {
1602 // CHECK: load i64
1603 // CHECK: zext i64
1604 // CHECK: ret <1 x i128>
1605 return vec_xl_zext(llb, ullap);
1606 }
1607
test_vec_signextq_s128(void)1608 vector signed __int128 test_vec_signextq_s128(void) {
1609 // CHECK: @llvm.ppc.altivec.vextsd2q(<2 x i64>
1610 // CHECK-NEXT: ret <1 x i128>
1611 return vec_signextq(vslla);
1612 }
1613
test_vec_mod_u128(void)1614 vector unsigned __int128 test_vec_mod_u128(void) {
1615 // CHECK: urem <1 x i128>
1616 // CHECK-NEXT: ret <1 x i128>
1617 return vec_mod(vui128a, vui128b);
1618 }
1619
test_vec_mod_s128(void)1620 vector signed __int128 test_vec_mod_s128(void) {
1621 // CHECK: srem <1 x i128>
1622 // CHECK-NEXT: ret <1 x i128>
1623 return vec_mod(vsi128a, vsi128b);
1624 }
1625
test_vec_cmpeq_s128(void)1626 vector bool __int128 test_vec_cmpeq_s128(void) {
1627 // CHECK-LABEL: @test_vec_cmpeq_s128(
1628 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1629 // CHECK-NEXT: ret <1 x i128>
1630 return vec_cmpeq(vsi128a, vsi128b);
1631 }
1632
test_vec_cmpeq_u128(void)1633 vector bool __int128 test_vec_cmpeq_u128(void) {
1634 // CHECK-LABEL: @test_vec_cmpeq_u128(
1635 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1636 // CHECK-NEXT: ret <1 x i128>
1637 return vec_cmpeq(vui128a, vui128b);
1638 }
1639
test_vec_cmpne_s128(void)1640 vector bool __int128 test_vec_cmpne_s128(void) {
1641 // CHECK-LABEL: @test_vec_cmpne_s128(
1642 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1643 // CHECK-NEXT: %neg.i = xor <1 x i128> %4, <i128 -1>
1644 // CHECK-NEXT: ret <1 x i128> %neg.i
1645 return vec_cmpne(vsi128a, vsi128b);
1646 }
1647
test_vec_cmpne_u128(void)1648 vector bool __int128 test_vec_cmpne_u128(void) {
1649 // CHECK-LABEL: @test_vec_cmpne_u128(
1650 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1651 // CHECK-NEXT: %neg.i = xor <1 x i128> %4, <i128 -1>
1652 // CHECK-NEXT: ret <1 x i128>
1653 return vec_cmpne(vui128a, vui128b);
1654 }
1655
test_vec_cmpgt_s128(void)1656 vector bool __int128 test_vec_cmpgt_s128(void) {
1657 // CHECK-LABEL: @test_vec_cmpgt_s128(
1658 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1659 // CHECK-NEXT: ret <1 x i128>
1660 return vec_cmpgt(vsi128a, vsi128b);
1661 }
1662
test_vec_cmpgt_u128(void)1663 vector bool __int128 test_vec_cmpgt_u128(void) {
1664 // CHECK-LABEL: @test_vec_cmpgt_u128(
1665 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1666 // CHECK-NEXT: ret <1 x i128>
1667 return vec_cmpgt(vui128a, vui128b);
1668 }
1669
test_vec_cmplt_s128(void)1670 vector bool __int128 test_vec_cmplt_s128(void) {
1671 // CHECK-LABEL: @test_vec_cmplt_s128(
1672 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1673 // CHECK-NEXT: ret <1 x i128>
1674 return vec_cmplt(vsi128a, vsi128b);
1675 }
1676
test_vec_cmplt_u128(void)1677 vector bool __int128 test_vec_cmplt_u128(void) {
1678 // CHECK-LABEL: @test_vec_cmplt_u128(
1679 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1680 // CHECK-NEXT: ret <1 x i128>
1681 return vec_cmplt(vui128a, vui128b);
1682 }
1683
test_vec_cmpge_s128(void)1684 vector bool __int128 test_vec_cmpge_s128(void) {
1685 // CHECK-LABEL: @test_vec_cmpge_s128(
1686 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1687 // CHECK-NEXT: %neg.i = xor <1 x i128> %6, <i128 -1>
1688 // CHECK-NEXT: ret <1 x i128>
1689 return vec_cmpge(vsi128a, vsi128b);
1690 }
1691
test_vec_cmpge_u128(void)1692 vector bool __int128 test_vec_cmpge_u128(void) {
1693 // CHECK-LABEL: @test_vec_cmpge_u128(
1694 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1695 // CHECK-NEXT: %neg.i = xor <1 x i128> %6, <i128 -1>
1696 // CHECK-NEXT: ret <1 x i128>
1697 return vec_cmpge(vui128a, vui128b);
1698 }
1699
test_vec_cmple_s128(void)1700 vector bool __int128 test_vec_cmple_s128(void) {
1701 // CHECK-LABEL: @test_vec_cmple_s128(
1702 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1703 // CHECK-NEXT: %neg.i.i = xor <1 x i128> %8, <i128 -1>
1704 // CHECK-NEXT: ret <1 x i128>
1705 return vec_cmple(vsi128a, vsi128b);
1706 }
1707
test_vec_cmple_u128(void)1708 vector bool __int128 test_vec_cmple_u128(void) {
1709 // CHECK-LABEL: @test_vec_cmple_u128(
1710 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1711 // CHECK-NEXT: %neg.i.i = xor <1 x i128> %8, <i128 -1>
1712 // CHECK-NEXT: ret <1 x i128>
1713 return vec_cmple(vui128a, vui128b);
1714 }
1715
test_vec_any_eq_u128(void)1716 int test_vec_any_eq_u128(void) {
1717 // CHECK-LABEL: @test_vec_any_eq_u128(
1718 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1719 // CHECK-NEXT: ret i32
1720 return vec_any_eq(vui128a, vui128b);
1721 }
1722
test_vec_any_eq_s128(void)1723 int test_vec_any_eq_s128(void) {
1724 // CHECK-LABEL: @test_vec_any_eq_s128(
1725 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1726 // CHECK-NEXT: ret i32
1727 return vec_any_eq(vsi128a, vsi128b);
1728 }
1729
test_vec_any_ne_s128(void)1730 int test_vec_any_ne_s128(void) {
1731 // CHECK-LABEL: @test_vec_any_ne_s128(
1732 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1733 // CHECK-NEXT: ret i32
1734 return vec_any_ne(vsi128a, vsi128b);
1735 }
1736
test_vec_any_ne_u128(void)1737 int test_vec_any_ne_u128(void) {
1738 // CHECK-LABEL: @test_vec_any_ne_u128(
1739 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1740 // CHECK-NEXT: ret i32
1741 return vec_any_ne(vui128a, vui128b);
1742 }
1743
test_vec_any_lt_s128(void)1744 int test_vec_any_lt_s128(void) {
1745 // CHECK-LABEL: @test_vec_any_lt_s128(
1746 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1747 // CHECK-NEXT: ret i32
1748 return vec_any_lt(vsi128a, vsi128b);
1749 }
1750
test_vec_any_lt_u128(void)1751 int test_vec_any_lt_u128(void) {
1752 // CHECK-LABEL: @test_vec_any_lt_u128(
1753 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1754 // CHECK-NEXT: ret i32
1755 return vec_any_lt(vui128a, vui128b);
1756 }
1757
test_vec_any_gt_s128(void)1758 int test_vec_any_gt_s128(void) {
1759 // CHECK-LABEL: @test_vec_any_gt_s128(
1760 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1761 // CHECK-NEXT: ret i32
1762 return vec_any_gt(vsi128a, vsi128b);
1763 }
1764
test_vec_any_gt_u128(void)1765 int test_vec_any_gt_u128(void) {
1766 // CHECK-LABEL: @test_vec_any_gt_u128(
1767 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1768 // CHECK-NEXT: ret i32
1769 return vec_any_gt(vui128a, vui128b);
1770 }
1771
test_vec_any_le_s128(void)1772 int test_vec_any_le_s128(void) {
1773 // CHECK-LABEL: @test_vec_any_le_s128(
1774 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1775 // CHECK-NEXT: ret i32
1776 return vec_any_le(vsi128a, vsi128b);
1777 }
1778
test_vec_any_le_u128(void)1779 int test_vec_any_le_u128(void) {
1780 // CHECK-LABEL: @test_vec_any_le_u128(
1781 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1782 // CHECK-NEXT: ret i32
1783 return vec_any_le(vui128a, vui128b);
1784 }
1785
test_vec_any_ge_s128(void)1786 int test_vec_any_ge_s128(void) {
1787 // CHECK-LABEL: @test_vec_any_ge_s128(
1788 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1789 // CHECK-NEXT: ret i32
1790 return vec_any_ge(vsi128a, vsi128b);
1791 }
1792
test_vec_any_ge_u128(void)1793 int test_vec_any_ge_u128(void) {
1794 // CHECK-LABEL: @test_vec_any_ge_u128(
1795 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1796 // CHECK-NEXT: ret i32
1797 return vec_any_ge(vui128a, vui128b);
1798 }
1799
test_vec_all_eq_s128(void)1800 int test_vec_all_eq_s128(void) {
1801 // CHECK-LABEL: @test_vec_all_eq_s128(
1802 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1803 // CHECK-NEXT: ret i32
1804 return vec_all_eq(vsi128a, vsi128b);
1805 }
1806
test_vec_all_eq_u128(void)1807 int test_vec_all_eq_u128(void) {
1808 // CHECK-LABEL: @test_vec_all_eq_u128(
1809 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1810 // CHECK-NEXT: ret i32
1811 return vec_all_eq(vui128a, vui128b);
1812 }
1813
test_vec_all_ne_s128(void)1814 int test_vec_all_ne_s128(void) {
1815 // CHECK-LABEL: @test_vec_all_ne_s128(
1816 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1817 // CHECK-NEXT: ret i32
1818 return vec_all_ne(vsi128a, vsi128b);
1819 }
1820
test_vec_all_ne_u128(void)1821 int test_vec_all_ne_u128(void) {
1822 // CHECK-LABEL: @test_vec_all_ne_u128(
1823 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1824 // CHECK-NEXT: ret i32
1825 return vec_all_ne(vui128a, vui128b);
1826 }
1827
test_vec_all_lt_s128(void)1828 int test_vec_all_lt_s128(void) {
1829 // CHECK-LABEL: @test_vec_all_lt_s128(
1830 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1831 // CHECK-NEXT: ret i32
1832 return vec_all_lt(vsi128a, vsi128b);
1833 }
1834
test_vec_all_lt_u128(void)1835 int test_vec_all_lt_u128(void) {
1836 // CHECK-LABEL: @test_vec_all_lt_u128(
1837 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1838 // CHECK: ret i32
1839 return vec_all_lt(vui128a, vui128b);
1840 }
1841
test_vec_all_gt_s128(void)1842 int test_vec_all_gt_s128(void) {
1843 // CHECK-LABEL: @test_vec_all_gt_s128(
1844 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1845 // CHECK-NEXT: ret i32
1846 return vec_all_gt(vsi128a, vsi128b);
1847 }
1848
test_vec_all_gt_u128(void)1849 int test_vec_all_gt_u128(void) {
1850 // CHECK-LABEL: @test_vec_all_gt_u128(
1851 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1852 // CHECK-NEXT: ret i32
1853 return vec_all_gt(vui128a, vui128b);
1854 }
1855
test_vec_all_le_s128(void)1856 int test_vec_all_le_s128(void) {
1857 // CHECK-LABEL: @test_vec_all_le_s128(
1858 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1859 // CHECK-NEXT: ret i32
1860 return vec_all_le(vsi128a, vsi128b);
1861 }
1862
test_vec_all_le_u128(void)1863 int test_vec_all_le_u128(void) {
1864 // CHECK-LABEL: @test_vec_all_le_u128(
1865 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1866 // CHECK-NEXT: ret i32
1867 return vec_all_le(vui128a, vui128b);
1868 }
1869
test_vec_all_ge_s128(void)1870 int test_vec_all_ge_s128(void) {
1871 // CHECK-LABEL: @test_vec_all_ge_s128(
1872 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1873 // CHECK-NEXT: ret i32
1874 return vec_all_ge(vsi128a, vsi128b);
1875 }
1876
test_vec_all_ge_u128(void)1877 int test_vec_all_ge_u128(void) {
1878 // CHECK-LABEL: @test_vec_all_ge_u128(
1879 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1880 // CHECK-NEXT: ret i32
1881 return vec_all_ge(vui128a, vui128b);
1882 }
1883
test_vec_rl_s128(void)1884 vector signed __int128 test_vec_rl_s128(void) {
1885 // CHECK-LABEL: @test_vec_rl_s128(
1886 // CHECK: sub <1 x i128>
1887 // CHECK-NEXT: lshr <1 x i128>
1888 // CHECK-NEXT: or <1 x i128>
1889 // CHECK-NEXT: ret <1 x i128>
1890 return vec_rl(vsi128a, vsi128b);
1891 }
1892
test_vec_rl_u128(void)1893 vector unsigned __int128 test_vec_rl_u128(void) {
1894 // CHECK-LABEL: @test_vec_rl_u128(
1895 // CHECK: sub <1 x i128>
1896 // CHECK: lshr <1 x i128>
1897 // CHECK: or <1 x i128>
1898 // CHECK-NEXT: ret <1 x i128>
1899 return vec_rl(vui128a, vui128b);
1900 }
1901
test_vec_rlnm_s128(void)1902 vector signed __int128 test_vec_rlnm_s128(void) {
1903 // CHECK-LABEL: @test_vec_rlnm_s128(
1904 // CHECK-LE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1905 // CHECK-BE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1906 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqnm(<1 x i128>
1907 // CHECK-NEXT: ret <1 x i128>
1908 return vec_rlnm(vsi128a, vsi128b, vsi128c);
1909 }
1910
test_vec_rlnm_u128(void)1911 vector unsigned __int128 test_vec_rlnm_u128(void) {
1912 // CHECK-LABEL: @test_vec_rlnm_u128(
1913 // CHECK-LE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1914 // CHECK-BE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1915 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqnm(<1 x i128>
1916 // CHECK-NEXT: ret <1 x i128>
1917 return vec_rlnm(vui128a, vui128b, vui128c);
1918 }
1919
test_vec_rlmi_s128(void)1920 vector signed __int128 test_vec_rlmi_s128(void) {
1921 // CHECK-LABEL: @test_vec_rlmi_s128(
1922 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqmi(<1 x i128>
1923 // CHECK-NEXT: ret <1 x i128>
1924 return vec_rlmi(vsi128a, vsi128b, vsi128c);
1925 }
1926
test_vec_rlmi_u128(void)1927 vector unsigned __int128 test_vec_rlmi_u128(void) {
1928 // CHECK-LABEL: @test_vec_rlmi_u128(
1929 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqmi(<1 x i128>
1930 // CHECK-NEXT: ret <1 x i128>
1931 return vec_rlmi(vui128a, vui128b, vui128c);
1932 }
1933