1 // REQUIRES: powerpc-registered-target
2 // RUN: %clang_cc1 -target-feature +vsx \
3 // RUN: -target-cpu pwr10 -triple powerpc64-unknown-unknown -emit-llvm %s \
4 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-BE,CHECK
5 // RUN: %clang_cc1 -target-feature +vsx \
6 // RUN: -target-cpu pwr10 -triple powerpc64le-unknown-unknown -emit-llvm %s \
7 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-LE,CHECK
8
9 #include <altivec.h>
10
11 vector signed __int128 vi128a;
12 vector signed char vsca, vscb;
13 vector unsigned char vuca, vucb, vucc;
14 vector signed short vssa, vssb;
15 vector unsigned short vusa, vusb, vusc;
16 vector signed int vsia, vsib;
17 vector unsigned int vuia, vuib, vuic;
18 vector signed long long vslla, vsllb;
19 vector unsigned long long vulla, vullb, vullc;
20 vector signed __int128 vsi128a, vsi128b, vsi128c;
21 vector unsigned __int128 vui128a, vui128b, vui128c;
22 vector bool __int128 vbi128a, vbi128b;
23 vector float vfa, vfb;
24 vector double vda, vdb;
25 float fa;
26 double da;
27 signed int sia;
28 signed int *iap;
29 unsigned int uia, uib, *uiap;
30 signed char *cap;
31 unsigned char uca;
32 const unsigned char *ucap;
33 const signed short *sap;
34 unsigned short usa;
35 const unsigned short *usap;
36 const signed long long *llap;
37 signed long long llb;
38 unsigned long long ulla;
39 const unsigned long long *ullap;
40
test_vec_mul_sll(void)41 vector signed long long test_vec_mul_sll(void) {
42 // CHECK: mul <2 x i64>
43 // CHECK-NEXT: ret <2 x i64>
44 return vec_mul(vslla, vsllb);
45 }
46
test_vec_mul_ull(void)47 vector unsigned long long test_vec_mul_ull(void) {
48 // CHECK: mul <2 x i64>
49 // CHECK-NEXT: ret <2 x i64>
50 return vec_mul(vulla, vullb);
51 }
52
test_vec_div_si(void)53 vector signed int test_vec_div_si(void) {
54 // CHECK: sdiv <4 x i32>
55 // CHECK-NEXT: ret <4 x i32>
56 return vec_div(vsia, vsib);
57 }
58
test_vec_div_ui(void)59 vector unsigned int test_vec_div_ui(void) {
60 // CHECK: udiv <4 x i32>
61 // CHECK-NEXT: ret <4 x i32>
62 return vec_div(vuia, vuib);
63 }
64
test_vec_div_sll(void)65 vector signed long long test_vec_div_sll(void) {
66 // CHECK: sdiv <2 x i64>
67 // CHECK-NEXT: ret <2 x i64>
68 return vec_div(vslla, vsllb);
69 }
70
test_vec_div_ull(void)71 vector unsigned long long test_vec_div_ull(void) {
72 // CHECK: udiv <2 x i64>
73 // CHECK-NEXT: ret <2 x i64>
74 return vec_div(vulla, vullb);
75 }
76
test_vec_div_u128(void)77 vector unsigned __int128 test_vec_div_u128(void) {
78 // CHECK: udiv <1 x i128>
79 // CHECK-NEXT: ret <1 x i128>
80 return vec_div(vui128a, vui128b);
81 }
82
test_vec_div_s128(void)83 vector signed __int128 test_vec_div_s128(void) {
84 // CHECK: sdiv <1 x i128>
85 // CHECK-NEXT: ret <1 x i128>
86 return vec_div(vsi128a, vsi128b);
87 }
88
test_vec_dive_si(void)89 vector signed int test_vec_dive_si(void) {
90 // CHECK: @llvm.ppc.altivec.vdivesw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
91 // CHECK-NEXT: ret <4 x i32>
92 return vec_dive(vsia, vsib);
93 }
94
test_vec_dive_ui(void)95 vector unsigned int test_vec_dive_ui(void) {
96 // CHECK: @llvm.ppc.altivec.vdiveuw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
97 // CHECK-NEXT: ret <4 x i32>
98 return vec_dive(vuia, vuib);
99 }
100
test_vec_dive_sll(void)101 vector signed long long test_vec_dive_sll(void) {
102 // CHECK: @llvm.ppc.altivec.vdivesd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
103 // CHECK-NEXT: ret <2 x i64>
104 return vec_dive(vslla, vsllb);
105 }
106
test_vec_dive_ull(void)107 vector unsigned long long test_vec_dive_ull(void) {
108 // CHECK: @llvm.ppc.altivec.vdiveud(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
109 // CHECK-NEXT: ret <2 x i64>
110 return vec_dive(vulla, vullb);
111 }
112
test_vec_dive_u128(void)113 vector unsigned __int128 test_vec_dive_u128(void) {
114 // CHECK: @llvm.ppc.altivec.vdiveuq(<1 x i128> %{{.+}}, <1 x i128> %{{.+}})
115 // CHECK-NEXT: ret <1 x i128>
116 return vec_dive(vui128a, vui128b);
117 }
118
test_vec_dive_s128(void)119 vector signed __int128 test_vec_dive_s128(void) {
120 // CHECK: @llvm.ppc.altivec.vdivesq(<1 x i128> %{{.+}}, <1 x i128> %{{.+}})
121 // CHECK-NEXT: ret <1 x i128>
122 return vec_dive(vsi128a, vsi128b);
123 }
124
test_vec_mulh_si(void)125 vector signed int test_vec_mulh_si(void) {
126 // CHECK: @llvm.ppc.altivec.vmulhsw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
127 // CHECK-NEXT: ret <4 x i32>
128 return vec_mulh(vsia, vsib);
129 }
130
test_vec_mulh_ui(void)131 vector unsigned int test_vec_mulh_ui(void) {
132 // CHECK: @llvm.ppc.altivec.vmulhuw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
133 // CHECK-NEXT: ret <4 x i32>
134 return vec_mulh(vuia, vuib);
135 }
136
test_vec_mulh_sll(void)137 vector signed long long test_vec_mulh_sll(void) {
138 // CHECK: @llvm.ppc.altivec.vmulhsd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
139 // CHECK-NEXT: ret <2 x i64>
140 return vec_mulh(vslla, vsllb);
141 }
142
test_vec_mulh_ull(void)143 vector unsigned long long test_vec_mulh_ull(void) {
144 // CHECK: @llvm.ppc.altivec.vmulhud(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
145 // CHECK-NEXT: ret <2 x i64>
146 return vec_mulh(vulla, vullb);
147 }
148
test_vec_mod_si(void)149 vector signed int test_vec_mod_si(void) {
150 // CHECK: srem <4 x i32>
151 // CHECK-NEXT: ret <4 x i32>
152 return vec_mod(vsia, vsib);
153 }
154
test_vec_mod_ui(void)155 vector unsigned int test_vec_mod_ui(void) {
156 // CHECK: urem <4 x i32>
157 // CHECK-NEXT: ret <4 x i32>
158 return vec_mod(vuia, vuib);
159 }
160
test_vec_mod_sll(void)161 vector signed long long test_vec_mod_sll(void) {
162 // CHECK: srem <2 x i64>
163 // CHECK-NEXT: ret <2 x i64>
164 return vec_mod(vslla, vsllb);
165 }
166
test_vec_mod_ull(void)167 vector unsigned long long test_vec_mod_ull(void) {
168 // CHECK: urem <2 x i64>
169 // CHECK-NEXT: ret <2 x i64>
170 return vec_mod(vulla, vullb);
171 }
172
test_xvcvspbf16(vector unsigned char vc)173 vector unsigned char test_xvcvspbf16(vector unsigned char vc) {
174 // CHECK-LABEL: @test_xvcvspbf16(
175 // CHECK: [[TMP0:%.*]] = call <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8> [[VC:%.*]])
176 return __builtin_vsx_xvcvspbf16(vc);
177 }
178
test_xvcvbf16spn(vector unsigned char vc)179 vector unsigned char test_xvcvbf16spn(vector unsigned char vc) {
180 // CHECK-LABEL: @test_xvcvbf16spn(
181 // CHECK: [[TMP0:%.*]] = call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> [[VC:%.*]])
182 return __builtin_vsx_xvcvbf16spn(vc);
183 }
184
test_vpdepd(void)185 vector unsigned long long test_vpdepd(void) {
186 // CHECK: @llvm.ppc.altivec.vpdepd(<2 x i64>
187 // CHECK-NEXT: ret <2 x i64>
188 return vec_pdep(vulla, vullb);
189 }
190
test_vpextd(void)191 vector unsigned long long test_vpextd(void) {
192 // CHECK: @llvm.ppc.altivec.vpextd(<2 x i64>
193 // CHECK-NEXT: ret <2 x i64>
194 return vec_pext(vulla, vullb);
195 }
196
test_vec_stril_uc(void)197 vector unsigned char test_vec_stril_uc(void) {
198 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
199 // CHECK-BE-NEXT: ret <16 x i8>
200 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
201 // CHECK-LE-NEXT: ret <16 x i8>
202 return vec_stril(vuca);
203 }
204
test_vec_stril_sc(void)205 vector signed char test_vec_stril_sc(void) {
206 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
207 // CHECK-BE-NEXT: ret <16 x i8>
208 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
209 // CHECK-LE-NEXT: ret <16 x i8>
210 return vec_stril(vsca);
211 }
212
test_vec_stril_us(void)213 vector unsigned short test_vec_stril_us(void) {
214 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
215 // CHECK-BE-NEXT: ret <8 x i16>
216 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
217 // CHECK-LE-NEXT: ret <8 x i16>
218 return vec_stril(vusa);
219 }
220
test_vec_stril_ss(void)221 vector signed short test_vec_stril_ss(void) {
222 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
223 // CHECK-BE-NEXT: ret <8 x i16>
224 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
225 // CHECK-LE-NEXT: ret <8 x i16>
226 return vec_stril(vssa);
227 }
228
test_vec_stril_p_uc(void)229 int test_vec_stril_p_uc(void) {
230 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
231 // CHECK-BE-NEXT: ret i32
232 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
233 // CHECK-LE-NEXT: ret i32
234 return vec_stril_p(vuca);
235 }
236
test_vec_stril_p_sc(void)237 int test_vec_stril_p_sc(void) {
238 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
239 // CHECK-BE-NEXT: ret i32
240 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
241 // CHECK-LE-NEXT: ret i32
242 return vec_stril_p(vsca);
243 }
244
test_vec_stril_p_us(void)245 int test_vec_stril_p_us(void) {
246 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
247 // CHECK-BE-NEXT: ret i32
248 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
249 // CHECK-LE-NEXT: ret i32
250 return vec_stril_p(vusa);
251 }
252
test_vec_stril_p_ss(void)253 int test_vec_stril_p_ss(void) {
254 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
255 // CHECK-BE-NEXT: ret i32
256 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
257 // CHECK-LE-NEXT: ret i32
258 return vec_stril_p(vssa);
259 }
260
test_vec_stril_p_uc_2(vector unsigned char * ptr,int len)261 vector unsigned char test_vec_stril_p_uc_2(vector unsigned char *ptr, int len) {
262 // CHECK-BE: icmp slt i32
263 // CHECK-BE: br i1
264 // CHECK-BE: for.body:
265 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
266 // CHECK-BE: if.then:
267 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
268 // CHECK-BE: ret <16 x i8>
269 // CHECK-LE: icmp slt i32
270 // CHECK-LE: br i1
271 // CHECK-LE: for.body:
272 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
273 // CHECK-LE: if.then:
274 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
275 // CHECK-LE: ret <16 x i8>
276 for (int i = 0; i < len; i++) {
277 if (vec_stril_p(*(ptr + i))) {
278 return vec_stril(*(ptr + i));
279 }
280 }
281 return vec_stril(*(ptr));
282 }
283
test_vec_stril_p_sc_2(vector signed char * ptr,int len)284 vector signed char test_vec_stril_p_sc_2(vector signed char *ptr, int len) {
285 // CHECK-BE: icmp slt i32
286 // CHECK-BE: br i1
287 // CHECK-BE: for.body:
288 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
289 // CHECK-BE: if.then:
290 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
291 // CHECK-BE: ret <16 x i8>
292 // CHECK-LE: icmp slt i32
293 // CHECK-LE: br i1
294 // CHECK-LE: for.body:
295 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
296 // CHECK-LE: if.then:
297 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
298 // CHECK-LE: ret <16 x i8>
299 for (int i = 0; i < len; i++) {
300 if (vec_stril_p(*(ptr + i))) {
301 return vec_stril(*(ptr + i));
302 }
303 }
304 return vec_stril(*(ptr));
305 }
306
test_vec_stril_p_us_2(vector unsigned short * ptr,int len)307 vector unsigned short test_vec_stril_p_us_2(vector unsigned short *ptr, int len) {
308 // CHECK-BE: icmp slt i32
309 // CHECK-BE: br i1
310 // CHECK-BE: for.body:
311 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
312 // CHECK-BE: if.then:
313 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
314 // CHECK-BE: ret <8 x i16>
315 // CHECK-LE: icmp slt i32
316 // CHECK-LE: br i1
317 // CHECK-LE: for.body:
318 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
319 // CHECK-LE: if.then:
320 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
321 // CHECK-LE: ret <8 x i16>
322 for (int i = 0; i < len; i++) {
323 if (vec_stril_p(*(ptr + i))) {
324 return vec_stril(*(ptr + i));
325 }
326 }
327 return vec_stril(*(ptr));
328 }
329
test_vec_stril_p_ss_2(vector signed short * ptr,int len)330 vector signed short test_vec_stril_p_ss_2(vector signed short *ptr, int len) {
331 // CHECK-BE: icmp slt i32
332 // CHECK-BE: br i1
333 // CHECK-BE: for.body:
334 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
335 // CHECK-BE: if.then:
336 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
337 // CHECK-BE: ret <8 x i16>
338 // CHECK-LE: icmp slt i32
339 // CHECK-LE: br i1
340 // CHECK-LE: for.body:
341 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
342 // CHECK-LE: if.then:
343 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
344 // CHECK-LE: ret <8 x i16>
345 for (int i = 0; i < len; i++) {
346 if (vec_stril_p(*(ptr + i))) {
347 return vec_stril(*(ptr + i));
348 }
349 }
350 return vec_stril(*(ptr));
351 }
352
test_vec_strir_uc(void)353 vector unsigned char test_vec_strir_uc(void) {
354 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
355 // CHECK-BE-NEXT: ret <16 x i8>
356 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
357 // CHECK-LE-NEXT: ret <16 x i8>
358 return vec_strir(vuca);
359 }
360
test_vec_strir_sc(void)361 vector signed char test_vec_strir_sc(void) {
362 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
363 // CHECK-BE-NEXT: ret <16 x i8>
364 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
365 // CHECK-LE-NEXT: ret <16 x i8>
366 return vec_strir(vsca);
367 }
368
test_vec_strir_us(void)369 vector unsigned short test_vec_strir_us(void) {
370 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
371 // CHECK-BE-NEXT: ret <8 x i16>
372 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
373 // CHECK-LE-NEXT: ret <8 x i16>
374 return vec_strir(vusa);
375 }
376
test_vec_strir_ss(void)377 vector signed short test_vec_strir_ss(void) {
378 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
379 // CHECK-BE-NEXT: ret <8 x i16>
380 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
381 // CHECK-LE-NEXT: ret <8 x i16>
382 return vec_strir(vssa);
383 }
384
test_vec_strir_p_uc(void)385 int test_vec_strir_p_uc(void) {
386 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
387 // CHECK-BE-NEXT: ret i32
388 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
389 // CHECK-LE-NEXT: ret i32
390 return vec_strir_p(vuca);
391 }
392
test_vec_strir_p_sc(void)393 int test_vec_strir_p_sc(void) {
394 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
395 // CHECK-BE-NEXT: ret i32
396 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
397 // CHECK-LE-NEXT: ret i32
398 return vec_strir_p(vsca);
399 }
400
test_vec_strir_p_us(void)401 int test_vec_strir_p_us(void) {
402 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
403 // CHECK-BE-NEXT: ret i32
404 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
405 // CHECK-LE-NEXT: ret i32
406 return vec_strir_p(vusa);
407 }
408
test_vec_strir_p_ss(void)409 int test_vec_strir_p_ss(void) {
410 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
411 // CHECK-BE-NEXT: ret i32
412 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
413 // CHECK-LE-NEXT: ret i32
414 return vec_strir_p(vssa);
415 }
416
test_vec_strir_p_uc_2(vector unsigned char * ptr,int len)417 vector unsigned char test_vec_strir_p_uc_2(vector unsigned char *ptr, int len) {
418 // CHECK-BE: icmp slt i32
419 // CHECK-BE: br i1
420 // CHECK-BE: for.body:
421 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
422 // CHECK-BE: if.then:
423 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
424 // CHECK-BE: ret <16 x i8>
425 // CHECK-LE: icmp slt i32
426 // CHECK-LE: br i1
427 // CHECK-LE: for.body:
428 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
429 // CHECK-LE: if.then:
430 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
431 // CHECK-LE: ret <16 x i8>
432 for (int i = 0; i < len; i++) {
433 if (vec_strir_p(*(ptr + i))) {
434 return vec_strir(*(ptr + i));
435 }
436 }
437 return vec_strir(*(ptr));
438 }
439
test_vec_strir_p_sc_2(vector signed char * ptr,int len)440 vector signed char test_vec_strir_p_sc_2(vector signed char *ptr, int len) {
441 // CHECK-BE: icmp slt i32
442 // CHECK-BE: br i1
443 // CHECK-BE: for.body:
444 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
445 // CHECK-BE: if.then:
446 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
447 // CHECK-BE: ret <16 x i8>
448 // CHECK-LE: icmp slt i32
449 // CHECK-LE: br i1
450 // CHECK-LE: for.body:
451 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
452 // CHECK-LE: if.then:
453 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
454 // CHECK-LE: ret <16 x i8>
455 for (int i = 0; i < len; i++) {
456 if (vec_strir_p(*(ptr + i))) {
457 return vec_strir(*(ptr + i));
458 }
459 }
460 return vec_strir(*(ptr));
461 }
462
test_vec_strir_p_us_2(vector unsigned short * ptr,int len)463 vector unsigned short test_vec_strir_p_us_2(vector unsigned short *ptr, int len) {
464 // CHECK-BE: icmp slt i32
465 // CHECK-BE: br i1
466 // CHECK-BE: for.body:
467 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
468 // CHECK-BE: if.then:
469 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
470 // CHECK-BE: ret <8 x i16>
471 // CHECK-LE: icmp slt i32
472 // CHECK-LE: br i1
473 // CHECK-LE: for.body:
474 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
475 // CHECK-LE: if.then:
476 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
477 // CHECK-LE: ret <8 x i16>
478 for (int i = 0; i < len; i++) {
479 if (vec_strir_p(*(ptr + i))) {
480 return vec_strir(*(ptr + i));
481 }
482 }
483 return vec_strir(*(ptr));
484 }
485
test_vec_strir_p_ss_2(vector signed short * ptr,int len)486 vector signed short test_vec_strir_p_ss_2(vector signed short *ptr, int len) {
487 // CHECK-BE: icmp slt i32
488 // CHECK-BE: br i1
489 // CHECK-BE: for.body:
490 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
491 // CHECK-BE: if.then:
492 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
493 // CHECK-BE: ret <8 x i16>
494 // CHECK-LE: icmp slt i32
495 // CHECK-LE: br i1
496 // CHECK-LE: for.body:
497 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
498 // CHECK-LE: if.then:
499 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
500 // CHECK-LE: ret <8 x i16>
501 for (int i = 0; i < len; i++) {
502 if (vec_strir_p(*(ptr + i))) {
503 return vec_strir(*(ptr + i));
504 }
505 }
506 return vec_strir(*(ptr));
507 }
508
test_vec_extractm_uc(void)509 unsigned int test_vec_extractm_uc(void) {
510 // CHECK: @llvm.ppc.altivec.vextractbm(<16 x i8> %{{.+}})
511 // CHECK-NEXT: ret i32
512 return vec_extractm(vuca);
513 }
514
test_vec_extractm_us(void)515 unsigned int test_vec_extractm_us(void) {
516 // CHECK: @llvm.ppc.altivec.vextracthm(<8 x i16> %{{.+}})
517 // CHECK-NEXT: ret i32
518 return vec_extractm(vusa);
519 }
520
test_vec_extractm_ui(void)521 unsigned int test_vec_extractm_ui(void) {
522 // CHECK: @llvm.ppc.altivec.vextractwm(<4 x i32> %{{.+}})
523 // CHECK-NEXT: ret i32
524 return vec_extractm(vuia);
525 }
526
test_vec_extractm_ull(void)527 unsigned int test_vec_extractm_ull(void) {
528 // CHECK: @llvm.ppc.altivec.vextractdm(<2 x i64> %{{.+}})
529 // CHECK-NEXT: ret i32
530 return vec_extractm(vulla);
531 }
532
test_vec_extractm_u128(void)533 unsigned int test_vec_extractm_u128(void) {
534 // CHECK: @llvm.ppc.altivec.vextractqm(<1 x i128> %{{.+}})
535 // CHECK-NEXT: ret i32
536 return vec_extractm(vui128a);
537 }
538
test_vcfuged(void)539 vector unsigned long long test_vcfuged(void) {
540 // CHECK: @llvm.ppc.altivec.vcfuged(<2 x i64>
541 // CHECK-NEXT: ret <2 x i64>
542 return vec_cfuge(vulla, vullb);
543 }
544
test_vec_expandm_uc(void)545 vector unsigned char test_vec_expandm_uc(void) {
546 // CHECK: @llvm.ppc.altivec.vexpandbm(<16 x i8> %{{.+}})
547 // CHECK-NEXT: ret <16 x i8>
548 return vec_expandm(vuca);
549 }
550
test_vec_expandm_us(void)551 vector unsigned short test_vec_expandm_us(void) {
552 // CHECK: @llvm.ppc.altivec.vexpandhm(<8 x i16> %{{.+}})
553 // CHECK-NEXT: ret <8 x i16>
554 return vec_expandm(vusa);
555 }
556
test_vec_expandm_ui(void)557 vector unsigned int test_vec_expandm_ui(void) {
558 // CHECK: @llvm.ppc.altivec.vexpandwm(<4 x i32> %{{.+}})
559 // CHECK-NEXT: ret <4 x i32>
560 return vec_expandm(vuia);
561 }
562
test_vec_expandm_ull(void)563 vector unsigned long long test_vec_expandm_ull(void) {
564 // CHECK: @llvm.ppc.altivec.vexpanddm(<2 x i64> %{{.+}})
565 // CHECK-NEXT: ret <2 x i64>
566 return vec_expandm(vulla);
567 }
568
test_vec_expandm_u128(void)569 vector unsigned __int128 test_vec_expandm_u128(void) {
570 // CHECK: @llvm.ppc.altivec.vexpandqm(<1 x i128> %{{.+}})
571 // CHECK-NEXT: ret <1 x i128>
572 return vec_expandm(vui128a);
573 }
574
test_vec_cntm_uc(void)575 unsigned long long test_vec_cntm_uc(void) {
576 // CHECK: @llvm.ppc.altivec.vcntmbb(<16 x i8> %{{.+}}, i32
577 // CHECK-NEXT: ret i64
578 return vec_cntm(vuca, 1);
579 }
580
test_vec_cntm_us(void)581 unsigned long long test_vec_cntm_us(void) {
582 // CHECK: @llvm.ppc.altivec.vcntmbh(<8 x i16> %{{.+}}, i32
583 // CHECK-NEXT: ret i64
584 return vec_cntm(vusa, 0);
585 }
586
test_vec_cntm_ui(void)587 unsigned long long test_vec_cntm_ui(void) {
588 // CHECK: @llvm.ppc.altivec.vcntmbw(<4 x i32> %{{.+}}, i32
589 // CHECK-NEXT: ret i64
590 return vec_cntm(vuia, 1);
591 }
592
test_vec_cntm_ull(void)593 unsigned long long test_vec_cntm_ull(void) {
594 // CHECK: @llvm.ppc.altivec.vcntmbd(<2 x i64> %{{.+}}, i32
595 // CHECK-NEXT: ret i64
596 return vec_cntm(vulla, 0);
597 }
598
test_vec_genbm(void)599 vector unsigned char test_vec_genbm(void) {
600 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
601 // CHECK-NEXT: ret <16 x i8>
602 return vec_genbm(ulla);
603 }
604
test_vec_genbm_imm(void)605 vector unsigned char test_vec_genbm_imm(void) {
606 // CHECK: store i64 1
607 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
608 // CHECK-NEXT: ret <16 x i8>
609 return vec_genbm(1);
610 }
611
test_vec_genbm_imm2(void)612 vector unsigned char test_vec_genbm_imm2(void) {
613 // CHECK: store i64 255
614 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
615 // CHECK-NEXT: ret <16 x i8>
616 return vec_genbm(255);
617 }
618
test_vec_genbm_imm3(void)619 vector unsigned char test_vec_genbm_imm3(void) {
620 // CHECK: store i64 65535
621 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
622 // CHECK-NEXT: ret <16 x i8>
623 return vec_genbm(65535);
624 }
625
test_vec_genbm_imm4(void)626 vector unsigned char test_vec_genbm_imm4(void) {
627 // CHECK: store i64 65536
628 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
629 // CHECK-NEXT: ret <16 x i8>
630 return vec_genbm(65536);
631 }
632
test_vec_genbm_imm5(void)633 vector unsigned char test_vec_genbm_imm5(void) {
634 // CHECK: store i64 65546
635 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
636 // CHECK-NEXT: ret <16 x i8>
637 return vec_genbm(65546);
638 }
639
test_vec_genhm(void)640 vector unsigned short test_vec_genhm(void) {
641 // CHECK: @llvm.ppc.altivec.mtvsrhm(i64 %{{.+}})
642 // CHECK-NEXT: ret <8 x i16>
643 return vec_genhm(ulla);
644 }
645
test_vec_genwm(void)646 vector unsigned int test_vec_genwm(void) {
647 // CHECK: @llvm.ppc.altivec.mtvsrwm(i64 %{{.+}})
648 // CHECK-NEXT: ret <4 x i32>
649 return vec_genwm(ulla);
650 }
651
test_vec_gendm(void)652 vector unsigned long long test_vec_gendm(void) {
653 // CHECK: @llvm.ppc.altivec.mtvsrdm(i64 %{{.+}})
654 // CHECK-NEXT: ret <2 x i64>
655 return vec_gendm(ulla);
656 }
657
test_vec_genqm(void)658 vector unsigned __int128 test_vec_genqm(void) {
659 // CHECK: @llvm.ppc.altivec.mtvsrqm(i64 %{{.+}})
660 // CHECK-NEXT: ret <1 x i128>
661 return vec_genqm(ulla);
662 }
663
test_vgnb_1(void)664 unsigned long long test_vgnb_1(void) {
665 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 2)
666 // CHECK-NEXT: ret i64
667 return vec_gnb(vui128a, 2);
668 }
669
test_vgnb_2(void)670 unsigned long long test_vgnb_2(void) {
671 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 7)
672 // CHECK-NEXT: ret i64
673 return vec_gnb(vui128a, 7);
674 }
675
test_vgnb_3(void)676 unsigned long long test_vgnb_3(void) {
677 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 5)
678 // CHECK-NEXT: ret i64
679 return vec_gnb(vui128a, 5);
680 }
681
test_xxeval_uc(void)682 vector unsigned char test_xxeval_uc(void) {
683 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 0)
684 // CHECK: ret <16 x i8>
685 return vec_ternarylogic(vuca, vucb, vucc, 0);
686 }
687
test_xxeval_us(void)688 vector unsigned short test_xxeval_us(void) {
689 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 255)
690 // CHECK: ret <8 x i16>
691 return vec_ternarylogic(vusa, vusb, vusc, 255);
692 }
693
test_xxeval_ui(void)694 vector unsigned int test_xxeval_ui(void) {
695 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 150)
696 // CHECK: ret <4 x i32>
697 return vec_ternarylogic(vuia, vuib, vuic, 150);
698 }
699
test_xxeval_ull(void)700 vector unsigned long long test_xxeval_ull(void) {
701 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 1)
702 // CHECK: ret <2 x i64>
703 return vec_ternarylogic(vulla, vullb, vullc, 1);
704 }
705
test_xxeval_ui128(void)706 vector unsigned __int128 test_xxeval_ui128(void) {
707 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 246)
708 // CHECK: ret <1 x i128>
709 return vec_ternarylogic(vui128a, vui128b, vui128c, 246);
710 }
711
test_xxgenpcvbm(void)712 vector unsigned char test_xxgenpcvbm(void) {
713 // CHECK: @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %{{.+}}, i32
714 // CHECK-NEXT: ret <16 x i8>
715 return vec_genpcvm(vuca, 0);
716 }
717
test_xxgenpcvhm(void)718 vector unsigned short test_xxgenpcvhm(void) {
719 // CHECK: @llvm.ppc.vsx.xxgenpcvhm(<8 x i16> %{{.+}}, i32
720 // CHECK-NEXT: ret <8 x i16>
721 return vec_genpcvm(vusa, 0);
722 }
723
test_xxgenpcvwm(void)724 vector unsigned int test_xxgenpcvwm(void) {
725 // CHECK: @llvm.ppc.vsx.xxgenpcvwm(<4 x i32> %{{.+}}, i32
726 // CHECK-NEXT: ret <4 x i32>
727 return vec_genpcvm(vuia, 0);
728 }
729
test_xxgenpcvdm(void)730 vector unsigned long long test_xxgenpcvdm(void) {
731 // CHECK: @llvm.ppc.vsx.xxgenpcvdm(<2 x i64> %{{.+}}, i32
732 // CHECK-NEXT: ret <2 x i64>
733 return vec_genpcvm(vulla, 0);
734 }
735
test_vec_clr_first_sc(void)736 vector signed char test_vec_clr_first_sc(void) {
737 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
738 // CHECK-BE-NEXT: ret <16 x i8>
739 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
740 // CHECK-LE-NEXT: ret <16 x i8>
741 return vec_clr_first(vsca, uia);
742 }
743
test_vec_clr_first_uc(void)744 vector unsigned char test_vec_clr_first_uc(void) {
745 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
746 // CHECK-BE-NEXT: ret <16 x i8>
747 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
748 // CHECK-LE-NEXT: ret <16 x i8>
749 return vec_clr_first(vuca, uia);
750 }
751
test_vec_clr_last_sc(void)752 vector signed char test_vec_clr_last_sc(void) {
753 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
754 // CHECK-BE-NEXT: ret <16 x i8>
755 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
756 // CHECK-LE-NEXT: ret <16 x i8>
757 return vec_clr_last(vsca, uia);
758 }
759
test_vec_clr_last_uc(void)760 vector unsigned char test_vec_clr_last_uc(void) {
761 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
762 // CHECK-BE-NEXT: ret <16 x i8>
763 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
764 // CHECK-LE-NEXT: ret <16 x i8>
765 return vec_clr_last(vuca, uia);
766 }
767
test_vclzdm(void)768 vector unsigned long long test_vclzdm(void) {
769 // CHECK: @llvm.ppc.altivec.vclzdm(<2 x i64>
770 // CHECK-NEXT: ret <2 x i64>
771 return vec_cntlzm(vulla, vullb);
772 }
773
test_vctzdm(void)774 vector unsigned long long test_vctzdm(void) {
775 // CHECK: @llvm.ppc.altivec.vctzdm(<2 x i64>
776 // CHECK-NEXT: ret <2 x i64>
777 return vec_cnttzm(vulla, vullb);
778 }
779
test_vec_sldb_sc(void)780 vector signed char test_vec_sldb_sc(void) {
781 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
782 // CHECK-NEXT: ret <16 x i8>
783 return vec_sldb(vsca, vscb, 0);
784 }
785
test_vec_sldb_uc(void)786 vector unsigned char test_vec_sldb_uc(void) {
787 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
788 // CHECK-NEXT: ret <16 x i8>
789 return vec_sldb(vuca, vucb, 1);
790 }
791
test_vec_sldb_ss(void)792 vector signed short test_vec_sldb_ss(void) {
793 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
794 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
795 // CHECK-NEXT: ret <8 x i16>
796 return vec_sldb(vssa, vssb, 2);
797 }
798
test_vec_sldb_us(void)799 vector unsigned short test_vec_sldb_us(void) {
800 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
801 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
802 // CHECK-NEXT: ret <8 x i16>
803 return vec_sldb(vusa, vusb, 3);
804 }
805
test_vec_sldb_si(void)806 vector signed int test_vec_sldb_si(void) {
807 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
808 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
809 // CHECK-NEXT: ret <4 x i32>
810 return vec_sldb(vsia, vsib, 4);
811 }
812
test_vec_sldb_ui(void)813 vector unsigned int test_vec_sldb_ui(void) {
814 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
815 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
816 // CHECK-NEXT: ret <4 x i32>
817 return vec_sldb(vuia, vuib, 5);
818 }
819
test_vec_sldb_sll(void)820 vector signed long long test_vec_sldb_sll(void) {
821 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
822 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
823 // CHECK-NEXT: ret <2 x i64>
824 return vec_sldb(vslla, vsllb, 6);
825 }
826
test_vec_sldb_ull(void)827 vector unsigned long long test_vec_sldb_ull(void) {
828 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
829 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
830 // CHECK-NEXT: ret <2 x i64>
831 return vec_sldb(vulla, vullb, 7);
832 }
833
test_vec_srdb_sc(void)834 vector signed char test_vec_srdb_sc(void) {
835 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
836 // CHECK-NEXT: ret <16 x i8>
837 return vec_srdb(vsca, vscb, 8);
838 }
839
test_vec_srdb_uc(void)840 vector unsigned char test_vec_srdb_uc(void) {
841 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
842 // CHECK-NEXT: ret <16 x i8>
843 return vec_srdb(vuca, vucb, 9);
844 }
845
test_vec_srdb_ss(void)846 vector signed short test_vec_srdb_ss(void) {
847 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
848 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
849 // CHECK-NEXT: ret <8 x i16>
850 return vec_srdb(vssa, vssb, 10);
851 }
852
test_vec_srdb_us(void)853 vector unsigned short test_vec_srdb_us(void) {
854 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
855 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
856 // CHECK-NEXT: ret <8 x i16>
857 return vec_srdb(vusa, vusb, 3);
858 }
859
test_vec_srdb_si(void)860 vector signed int test_vec_srdb_si(void) {
861 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
862 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
863 // CHECK-NEXT: ret <4 x i32>
864 return vec_srdb(vsia, vsib, 4);
865 }
866
test_vec_srdb_ui(void)867 vector unsigned int test_vec_srdb_ui(void) {
868 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
869 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
870 // CHECK-NEXT: ret <4 x i32>
871 return vec_srdb(vuia, vuib, 5);
872 }
873
test_vec_srdb_sll(void)874 vector signed long long test_vec_srdb_sll(void) {
875 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
876 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
877 // CHECK-NEXT: ret <2 x i64>
878 return vec_srdb(vslla, vsllb, 6);
879 }
880
test_vec_srdb_ull(void)881 vector unsigned long long test_vec_srdb_ull(void) {
882 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
883 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
884 // CHECK-NEXT: ret <2 x i64>
885 return vec_srdb(vulla, vullb, 7);
886 }
887
test_vec_permx_sc(void)888 vector signed char test_vec_permx_sc(void) {
889 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
890 // CHECK-NEXT: ret <16 x i8>
891 return vec_permx(vsca, vscb, vucc, 0);
892 }
893
test_vec_permx_uc(void)894 vector unsigned char test_vec_permx_uc(void) {
895 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
896 // CHECK-NEXT: ret <16 x i8>
897 return vec_permx(vuca, vucb, vucc, 1);
898 }
899
test_vec_permx_ss(void)900 vector signed short test_vec_permx_ss(void) {
901 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
902 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
903 // CHECK-NEXT: ret <8 x i16>
904 return vec_permx(vssa, vssb, vucc, 2);
905 }
906
test_vec_permx_us(void)907 vector unsigned short test_vec_permx_us(void) {
908 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
909 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
910 // CHECK-NEXT: ret <8 x i16>
911 return vec_permx(vusa, vusb, vucc, 3);
912 }
913
test_vec_permx_si(void)914 vector signed int test_vec_permx_si(void) {
915 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
916 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
917 // CHECK-NEXT: ret <4 x i32>
918 return vec_permx(vsia, vsib, vucc, 4);
919 }
920
test_vec_permx_ui(void)921 vector unsigned int test_vec_permx_ui(void) {
922 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
923 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
924 // CHECK-NEXT: ret <4 x i32>
925 return vec_permx(vuia, vuib, vucc, 5);
926 }
927
test_vec_permx_sll(void)928 vector signed long long test_vec_permx_sll(void) {
929 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
930 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
931 // CHECK-NEXT: ret <2 x i64>
932 return vec_permx(vslla, vsllb, vucc, 6);
933 }
934
test_vec_permx_ull(void)935 vector unsigned long long test_vec_permx_ull(void) {
936 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
937 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
938 // CHECK-NEXT: ret <2 x i64>
939 return vec_permx(vulla, vullb, vucc, 7);
940 }
941
test_vec_permx_f(void)942 vector float test_vec_permx_f(void) {
943 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
944 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x float>
945 // CHECK-NEXT: ret <4 x float>
946 return vec_permx(vfa, vfb, vucc, 0);
947 }
948
test_vec_permx_d(void)949 vector double test_vec_permx_d(void) {
950 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
951 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x double>
952 // CHECK-NEXT: ret <2 x double>
953 return vec_permx(vda, vdb, vucc, 1);
954 }
955
test_vec_blend_sc(void)956 vector signed char test_vec_blend_sc(void) {
957 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
958 // CHECK-NEXT: ret <16 x i8>
959 return vec_blendv(vsca, vscb, vucc);
960 }
961
test_vec_blend_uc(void)962 vector unsigned char test_vec_blend_uc(void) {
963 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
964 // CHECK-NEXT: ret <16 x i8>
965 return vec_blendv(vuca, vucb, vucc);
966 }
967
test_vec_blend_ss(void)968 vector signed short test_vec_blend_ss(void) {
969 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
970 // CHECK-NEXT: ret <8 x i16>
971 return vec_blendv(vssa, vssb, vusc);
972 }
973
test_vec_blend_us(void)974 vector unsigned short test_vec_blend_us(void) {
975 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
976 // CHECK-NEXT: ret <8 x i16>
977 return vec_blendv(vusa, vusb, vusc);
978 }
979
test_vec_blend_si(void)980 vector signed int test_vec_blend_si(void) {
981 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
982 // CHECK-NEXT: ret <4 x i32>
983 return vec_blendv(vsia, vsib, vuic);
984 }
985
test_vec_blend_ui(void)986 vector unsigned int test_vec_blend_ui(void) {
987 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
988 // CHECK-NEXT: ret <4 x i32>
989 return vec_blendv(vuia, vuib, vuic);
990 }
991
test_vec_blend_sll(void)992 vector signed long long test_vec_blend_sll(void) {
993 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
994 // CHECK-NEXT: ret <2 x i64>
995 return vec_blendv(vslla, vsllb, vullc);
996 }
997
test_vec_blend_ull(void)998 vector unsigned long long test_vec_blend_ull(void) {
999 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
1000 // CHECK-NEXT: ret <2 x i64>
1001 return vec_blendv(vulla, vullb, vullc);
1002 }
1003
test_vec_blend_f(void)1004 vector float test_vec_blend_f(void) {
1005 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
1006 // CHECK-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1007 // CHECK-NEXT: ret <4 x float>
1008 return vec_blendv(vfa, vfb, vuic);
1009 }
1010
test_vec_blend_d(void)1011 vector double test_vec_blend_d(void) {
1012 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
1013 // CHECK-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1014 // CHECK-NEXT: ret <2 x double>
1015 return vec_blendv(vda, vdb, vullc);
1016 }
1017
test_vec_replace_elt_si(void)1018 vector signed int test_vec_replace_elt_si(void) {
1019 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 0
1020 // CHECK-BE-NEXT: ret <4 x i32>
1021 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 12
1022 // CHECK-LE-NEXT: ret <4 x i32>
1023 return vec_replace_elt(vsia, sia, 0);
1024 }
1025
test_vec_replace_elt_ui(void)1026 vector unsigned int test_vec_replace_elt_ui(void) {
1027 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 4
1028 // CHECK-BE-NEXT: ret <4 x i32>
1029 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 8
1030 // CHECK-LE-NEXT: ret <4 x i32>
1031 return vec_replace_elt(vuia, uia, 1);
1032 }
1033
test_vec_replace_elt_f(void)1034 vector float test_vec_replace_elt_f(void) {
1035 // CHECK-BE: bitcast float %{{.+}} to i32
1036 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 8
1037 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1038 // CHECK-BE-NEXT: ret <4 x float>
1039 // CHECK-LE: bitcast float %{{.+}} to i32
1040 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 4
1041 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1042 // CHECK-LE-NEXT: ret <4 x float>
1043 return vec_replace_elt(vfa, fa, 2);
1044 }
1045
test_vec_replace_elt_sll(void)1046 vector signed long long test_vec_replace_elt_sll(void) {
1047 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1048 // CHECK-BE-NEXT: ret <2 x i64>
1049 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1050 // CHECK-LE-NEXT: ret <2 x i64>
1051 return vec_replace_elt(vslla, llb, 0);
1052 }
1053
test_vec_replace_elt_ull(void)1054 vector unsigned long long test_vec_replace_elt_ull(void) {
1055 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1056 // CHECK-BE-NEXT: ret <2 x i64>
1057 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1058 // CHECK-LE-NEXT: ret <2 x i64>
1059 return vec_replace_elt(vulla, ulla, 0);
1060 }
1061
test_vec_replace_elt_d(void)1062 vector double test_vec_replace_elt_d(void) {
1063 // CHECK-BE: bitcast double %{{.+}} to i64
1064 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1065 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1066 // CHECK-BE-NEXT: ret <2 x double>
1067 // CHECK-LE: bitcast double %{{.+}} to i64
1068 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1069 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1070 // CHECK-LE-NEXT: ret <2 x double>
1071 return vec_replace_elt(vda, da, 1);
1072 }
1073
test_vec_replace_unaligned_si(void)1074 vector unsigned char test_vec_replace_unaligned_si(void) {
1075 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 6
1076 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1077 // CHECK-BE-NEXT: ret <16 x i8>
1078 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 6
1079 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1080 // CHECK-LE-NEXT: ret <16 x i8>
1081 return vec_replace_unaligned(vsia, sia, 6);
1082 }
1083
test_vec_replace_unaligned_ui(void)1084 vector unsigned char test_vec_replace_unaligned_ui(void) {
1085 // CHECK-BE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 8
1086 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1087 // CHECK-BE-NEXT: ret <16 x i8>
1088 // CHECK-LE: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 4
1089 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1090 // CHECK-LE-NEXT: ret <16 x i8>
1091 return vec_replace_unaligned(vuia, uia, 8);
1092 }
1093
test_vec_replace_unaligned_f(void)1094 vector unsigned char test_vec_replace_unaligned_f(void) {
1095 // CHECK-BE: bitcast float %{{.+}} to i32
1096 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 12
1097 // CHECK-BE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1098 // CHECK-BE-NEXT: ret <16 x i8>
1099 // CHECK-LE: bitcast float %{{.+}} to i32
1100 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsw(<4 x i32> %{{.+}}, i32 %{{.+}}, i32 0
1101 // CHECK-LE-NEXT: bitcast <4 x i32> %{{.*}} to <16 x i8>
1102 // CHECK-LE-NEXT: ret <16 x i8>
1103 return vec_replace_unaligned(vfa, fa, 12);
1104 }
1105
test_vec_replace_unaligned_sll(void)1106 vector unsigned char test_vec_replace_unaligned_sll(void) {
1107 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 6
1108 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1109 // CHECK-BE-NEXT: ret <16 x i8>
1110 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 2
1111 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1112 // CHECK-LE-NEXT: ret <16 x i8>
1113 return vec_replace_unaligned(vslla, llb, 6);
1114 }
1115
test_vec_replace_unaligned_ull(void)1116 vector unsigned char test_vec_replace_unaligned_ull(void) {
1117 // CHECK-BE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 7
1118 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1119 // CHECK-BE-NEXT: ret <16 x i8>
1120 // CHECK-LE: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 1
1121 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1122 // CHECK-LE-NEXT: ret <16 x i8>
1123 return vec_replace_unaligned(vulla, ulla, 7);
1124 }
1125
test_vec_replace_unaligned_d(void)1126 vector unsigned char test_vec_replace_unaligned_d(void) {
1127 // CHECK-BE: bitcast double %{{.+}} to i64
1128 // CHECK-BE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 8
1129 // CHECK-BE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1130 // CHECK-BE-NEXT: ret <16 x i8>
1131 // CHECK-LE: bitcast double %{{.+}} to i64
1132 // CHECK-LE-NEXT: @llvm.ppc.altivec.vinsd(<2 x i64> %{{.+}}, i64 %{{.+}}, i32 0
1133 // CHECK-LE-NEXT: bitcast <2 x i64> %{{.*}} to <16 x i8>
1134 // CHECK-LE-NEXT: ret <16 x i8>
1135 return vec_replace_unaligned(vda, da, 8);
1136 }
1137
test_vec_insertl_uc(void)1138 vector unsigned char test_vec_insertl_uc(void) {
1139 // CHECK-BE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1140 // CHECK-BE-NEXT: ret <16 x i8>
1141 // CHECK-LE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1142 // CHECK-LE-NEXT: ret <16 x i8>
1143 return vec_insertl(uca, vuca, uia);
1144 }
1145
test_vec_insertl_us(void)1146 vector unsigned short test_vec_insertl_us(void) {
1147 // CHECK-BE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1148 // CHECK-BE-NEXT: ret <8 x i16>
1149 // CHECK-LE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1150 // CHECK-LE-NEXT: ret <8 x i16>
1151 return vec_insertl(usa, vusa, uia);
1152 }
1153
test_vec_insertl_ui(void)1154 vector unsigned int test_vec_insertl_ui(void) {
1155 // CHECK-BE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1156 // CHECK-BE-NEXT: ret <4 x i32>
1157 // CHECK-LE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1158 // CHECK-LE-NEXT: ret <4 x i32>
1159 return vec_insertl(uib, vuia, uia);
1160 }
1161
test_vec_insertl_ul(void)1162 vector unsigned long long test_vec_insertl_ul(void) {
1163 // CHECK-BE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1164 // CHECK-BE-NEXT: ret <2 x i64>
1165 // CHECK-LE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1166 // CHECK-LE-NEXT: ret <2 x i64>
1167 return vec_insertl(ulla, vulla, uia);
1168 }
1169
test_vec_insertl_ucv(void)1170 vector unsigned char test_vec_insertl_ucv(void) {
1171 // CHECK-BE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1172 // CHECK-BE-NEXT: ret <16 x i8>
1173 // CHECK-LE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1174 // CHECK-LE-NEXT: ret <16 x i8>
1175 return vec_insertl(vuca, vucb, uia);
1176 }
1177
test_vec_insertl_usv(void)1178 vector unsigned short test_vec_insertl_usv(void) {
1179 // CHECK-BE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1180 // CHECK-BE-NEXT: ret <8 x i16>
1181 // CHECK-LE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1182 // CHECK-LE-NEXT: ret <8 x i16>
1183 return vec_insertl(vusa, vusb, uia);
1184 }
1185
test_vec_insertl_uiv(void)1186 vector unsigned int test_vec_insertl_uiv(void) {
1187 // CHECK-BE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1188 // CHECK-BE-NEXT: ret <4 x i32>
1189 // CHECK-LE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1190 // CHECK-LE-NEXT: ret <4 x i32>
1191 return vec_insertl(vuia, vuib, uia);
1192 }
1193
test_vec_inserth_uc(void)1194 vector unsigned char test_vec_inserth_uc(void) {
1195 // CHECK-BE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1196 // CHECK-BE-NEXT: ret <16 x i8>
1197 // CHECK-LE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1198 // CHECK-LE-NEXT: ret <16 x i8>
1199 return vec_inserth(uca, vuca, uia);
1200 }
1201
test_vec_inserth_us(void)1202 vector unsigned short test_vec_inserth_us(void) {
1203 // CHECK-BE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1204 // CHECK-BE-NEXT: ret <8 x i16>
1205 // CHECK-LE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1206 // CHECK-LE-NEXT: ret <8 x i16>
1207 return vec_inserth(usa, vusa, uia);
1208 }
1209
test_vec_inserth_ui(void)1210 vector unsigned int test_vec_inserth_ui(void) {
1211 // CHECK-BE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1212 // CHECK-BE-NEXT: ret <4 x i32>
1213 // CHECK-LE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1214 // CHECK-LE-NEXT: ret <4 x i32>
1215 return vec_inserth(uib, vuia, uia);
1216 }
1217
test_vec_inserth_ul(void)1218 vector unsigned long long test_vec_inserth_ul(void) {
1219 // CHECK-BE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1220 // CHECK-BE-NEXT: ret <2 x i64>
1221 // CHECK-LE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1222 // CHECK-LE-NEXT: ret <2 x i64>
1223 return vec_inserth(ulla, vulla, uia);
1224 }
1225
test_vec_inserth_ucv(void)1226 vector unsigned char test_vec_inserth_ucv(void) {
1227 // CHECK-BE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1228 // CHECK-BE-NEXT: ret <16 x i8>
1229 // CHECK-LE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1230 // CHECK-LE-NEXT: ret <16 x i8>
1231 return vec_inserth(vuca, vucb, uia);
1232 }
1233
test_vec_inserth_usv(void)1234 vector unsigned short test_vec_inserth_usv(void) {
1235 // CHECK-BE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1236 // CHECK-BE-NEXT: ret <8 x i16>
1237 // CHECK-LE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1238 // CHECK-LE-NEXT: ret <8 x i16>
1239 return vec_inserth(vusa, vusb, uia);
1240 }
1241
test_vec_inserth_uiv(void)1242 vector unsigned int test_vec_inserth_uiv(void) {
1243 // CHECK-BE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1244 // CHECK-BE-NEXT: ret <4 x i32>
1245 // CHECK-LE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1246 // CHECK-LE-NEXT: ret <4 x i32>
1247 return vec_inserth(vuia, vuib, uia);
1248 }
1249
test_vec_extractl_uc(void)1250 vector unsigned long long test_vec_extractl_uc(void) {
1251 // CHECK-BE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1252 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1253 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1254 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1255 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1256 // CHECK-BE: ret <2 x i64>
1257 // CHECK-LE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1258 // CHECK-LE-NEXT: ret <2 x i64>
1259 return vec_extractl(vuca, vucb, uia);
1260 }
1261
test_vec_extractl_us(void)1262 vector unsigned long long test_vec_extractl_us(void) {
1263 // CHECK-BE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1264 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1265 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1266 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1267 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1268 // CHECK-BE: ret <2 x i64>
1269 // CHECK-LE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1270 // CHECK-LE-NEXT: ret <2 x i64>
1271 return vec_extractl(vusa, vusb, uia);
1272 }
1273
test_vec_extractl_ui(void)1274 vector unsigned long long test_vec_extractl_ui(void) {
1275 // CHECK-BE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1276 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1277 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1278 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1279 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1280 // CHECK-BE: ret <2 x i64>
1281 // CHECK-LE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1282 // CHECK-LE-NEXT: ret <2 x i64>
1283 return vec_extractl(vuia, vuib, uia);
1284 }
1285
test_vec_extractl_ul(void)1286 vector unsigned long long test_vec_extractl_ul(void) {
1287 // CHECK-BE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1288 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1289 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1290 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1291 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1292 // CHECK-BE: ret <2 x i64>
1293 // CHECK-LE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1294 // CHECK-LE-NEXT: ret <2 x i64>
1295 return vec_extractl(vulla, vullb, uia);
1296 }
1297
test_vec_extracth_uc(void)1298 vector unsigned long long test_vec_extracth_uc(void) {
1299 // CHECK-BE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1300 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1301 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1302 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1303 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1304 // CHECK-BE: ret <2 x i64>
1305 // CHECK-LE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1306 // CHECK-LE-NEXT: ret <2 x i64>
1307 return vec_extracth(vuca, vucb, uia);
1308 }
1309
test_vec_extracth_us(void)1310 vector unsigned long long test_vec_extracth_us(void) {
1311 // CHECK-BE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1312 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1313 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1314 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1315 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1316 // CHECK-BE: ret <2 x i64>
1317 // CHECK-LE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1318 // CHECK-LE-NEXT: ret <2 x i64>
1319 return vec_extracth(vusa, vusb, uia);
1320 }
1321
test_vec_extracth_ui(void)1322 vector unsigned long long test_vec_extracth_ui(void) {
1323 // CHECK-BE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1324 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1325 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1326 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1327 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1328 // CHECK-BE: ret <2 x i64>
1329 // CHECK-LE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1330 // CHECK-LE-NEXT: ret <2 x i64>
1331 return vec_extracth(vuia, vuib, uia);
1332 }
1333
test_vec_extracth_ul(void)1334 vector unsigned long long test_vec_extracth_ul(void) {
1335 // CHECK-BE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1336 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1337 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1338 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1339 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1340 // CHECK-BE: ret <2 x i64>
1341 // CHECK-LE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1342 // CHECK-LE-NEXT: ret <2 x i64>
1343 return vec_extracth(vulla, vullb, uia);
1344 }
1345
test_vec_vec_splati_si(void)1346 vector signed int test_vec_vec_splati_si(void) {
1347 // CHECK: ret <4 x i32> <i32 -17, i32 -17, i32 -17, i32 -17>
1348 return vec_splati(-17);
1349 }
1350
test_vec_vec_splati_ui(void)1351 vector unsigned int test_vec_vec_splati_ui(void) {
1352 // CHECK: ret <4 x i32> <i32 16, i32 16, i32 16, i32 16>
1353 return vec_splati(16U);
1354 }
1355
test_vec_vec_splati_f(void)1356 vector float test_vec_vec_splati_f(void) {
1357 // CHECK: ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
1358 return vec_splati(1.0f);
1359 }
1360
test_vec_vec_splatid(void)1361 vector double test_vec_vec_splatid(void) {
1362 // CHECK-BE: [[T1:%.+]] = fpext float %{{.+}} to double
1363 // CHECK-BE-NEXT: [[T2:%.+]] = insertelement <2 x double> poison, double [[T1:%.+]], i32 0
1364 // CHECK-BE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> poison, <2 x i32> zeroinitialize
1365 // CHECK-BE-NEXT: ret <2 x double> [[T3:%.+]]
1366 // CHECK-LE: [[T1:%.+]] = fpext float %{{.+}} to double
1367 // CHECK-LE-NEXT: [[T2:%.+]] = insertelement <2 x double> poison, double [[T1:%.+]], i32 0
1368 // CHECK-LE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> poison, <2 x i32> zeroinitialize
1369 // CHECK-LE-NEXT: ret <2 x double> [[T3:%.+]]
1370 return vec_splatid(1.0);
1371 }
1372
test_vec_vec_splati_ins_si(void)1373 vector signed int test_vec_vec_splati_ins_si(void) {
1374 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1375 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1376 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1377 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1378 // CHECK-BE: ret <4 x i32>
1379 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1380 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1381 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1382 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1383 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1384 // CHECK-LE: ret <4 x i32>
1385 return vec_splati_ins(vsia, 0, -17);
1386 }
1387
test_vec_vec_splati_ins_ui(void)1388 vector unsigned int test_vec_vec_splati_ins_ui(void) {
1389 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1390 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1391 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1392 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1393 // CHECK-BE: ret <4 x i32>
1394 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1395 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1396 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1397 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1398 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1399 // CHECK-LE: ret <4 x i32>
1400 return vec_splati_ins(vuia, 1, 16U);
1401 }
1402
test_vec_vec_splati_ins_f(void)1403 vector float test_vec_vec_splati_ins_f(void) {
1404 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1405 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 %{{.+}}
1406 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1407 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
1408 // CHECK-BE: ret <4 x float>
1409 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1410 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1411 // CHECK-LE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
1412 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1413 // CHECK-LE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T2]]
1414 // CHECK-LE: ret <4 x float>
1415 return vec_splati_ins(vfa, 0, 1.0f);
1416 }
1417
1418 // In this test case, the second argument of vec_splati_ins is outside of the
1419 // expected range [0,1]. A mask of 0x01 is applied to obtain an in-range value
1420 // for the second argument.
test_vec_vec_splati_ins_range(void)1421 vector signed int test_vec_vec_splati_ins_range(void) {
1422 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1423 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1424 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1425 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1426 // CHECK-BE: ret <4 x i32>
1427 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1428 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1429 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1430 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1431 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1432 // CHECK-LE: ret <4 x i32>
1433 return vec_splati_ins(vsia, 2, -17);
1434 }
1435
test_vec_xst_trunc_sc(vector signed __int128 __a,signed long long __b,signed char * __c)1436 void test_vec_xst_trunc_sc(vector signed __int128 __a, signed long long __b,
1437 signed char *__c) {
1438 // CHECK: store i8 %{{.+}}, i8* %{{.+}}, align 1
1439 vec_xst_trunc(__a, __b, __c);
1440 }
1441
test_vec_xst_trunc_uc(vector unsigned __int128 __a,signed long long __b,unsigned char * __c)1442 void test_vec_xst_trunc_uc(vector unsigned __int128 __a, signed long long __b,
1443 unsigned char *__c) {
1444 // CHECK: store i8 %{{.+}}, i8* %{{.+}}, align 1
1445 vec_xst_trunc(__a, __b, __c);
1446 }
1447
test_vec_xst_trunc_ss(vector signed __int128 __a,signed long long __b,signed short * __c)1448 void test_vec_xst_trunc_ss(vector signed __int128 __a, signed long long __b,
1449 signed short *__c) {
1450 // CHECK: store i16 %{{.+}}, i16* %{{.+}}, align 2
1451 vec_xst_trunc(__a, __b, __c);
1452 }
1453
test_vec_xst_trunc_us(vector unsigned __int128 __a,signed long long __b,unsigned short * __c)1454 void test_vec_xst_trunc_us(vector unsigned __int128 __a, signed long long __b,
1455 unsigned short *__c) {
1456 // CHECK: store i16 %{{.+}}, i16* %{{.+}}, align 2
1457 vec_xst_trunc(__a, __b, __c);
1458 }
1459
test_vec_xst_trunc_si(vector signed __int128 __a,signed long long __b,signed int * __c)1460 void test_vec_xst_trunc_si(vector signed __int128 __a, signed long long __b,
1461 signed int *__c) {
1462 // CHECK: store i32 %{{.+}}, i32* %{{.+}}, align 4
1463 vec_xst_trunc(__a, __b, __c);
1464 }
1465
test_vec_xst_trunc_ui(vector unsigned __int128 __a,signed long long __b,unsigned int * __c)1466 void test_vec_xst_trunc_ui(vector unsigned __int128 __a, signed long long __b,
1467 unsigned int *__c) {
1468 // CHECK: store i32 %{{.+}}, i32* %{{.+}}, align 4
1469 vec_xst_trunc(__a, __b, __c);
1470 }
1471
test_vec_xst_trunc_sll(vector signed __int128 __a,signed long long __b,signed long long * __c)1472 void test_vec_xst_trunc_sll(vector signed __int128 __a, signed long long __b,
1473 signed long long *__c) {
1474 // CHECK: store i64 %{{.+}}, i64* %{{.+}}, align 8
1475 vec_xst_trunc(__a, __b, __c);
1476 }
1477
test_vec_xst_trunc_ull(vector unsigned __int128 __a,signed long long __b,unsigned long long * __c)1478 void test_vec_xst_trunc_ull(vector unsigned __int128 __a, signed long long __b,
1479 unsigned long long *__c) {
1480 // CHECK: store i64 %{{.+}}, i64* %{{.+}}, align 8
1481 vec_xst_trunc(__a, __b, __c);
1482 }
1483
test_vec_slq_unsigned(void)1484 vector unsigned __int128 test_vec_slq_unsigned (void) {
1485 // CHECK-LABEL: test_vec_slq_unsigned
1486 // CHECK: shl <1 x i128> %{{.+}}, %{{.+}}
1487 // CHECK: ret <1 x i128> %{{.+}}
1488 return vec_sl(vui128a, vui128b);
1489 }
1490
test_vec_slq_signed(void)1491 vector signed __int128 test_vec_slq_signed (void) {
1492 // CHECK-LABEL: test_vec_slq_signed
1493 // CHECK: shl <1 x i128> %{{.+}}, %{{.+}}
1494 // CHECK: ret <1 x i128>
1495 return vec_sl(vi128a, vui128a);
1496 }
1497
test_vec_srq_unsigned(void)1498 vector unsigned __int128 test_vec_srq_unsigned (void) {
1499 // CHECK-LABEL: test_vec_srq_unsigned
1500 // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}}
1501 // CHECK: ret <1 x i128>
1502 return vec_sr(vui128a, vui128b);
1503 }
1504
test_vec_srq_signed(void)1505 vector signed __int128 test_vec_srq_signed (void) {
1506 // CHECK-LABEL: test_vec_srq_signed
1507 // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}}
1508 // CHECK: ret <1 x i128>
1509 return vec_sr(vi128a, vui128a);
1510 }
1511
test_vec_sraq_unsigned(void)1512 vector unsigned __int128 test_vec_sraq_unsigned (void) {
1513 // CHECK-LABEL: test_vec_sraq_unsigned
1514 // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}}
1515 // CHECK: ret <1 x i128>
1516 return vec_sra(vui128a, vui128b);
1517 }
1518
test_vec_sraq_signed(void)1519 vector signed __int128 test_vec_sraq_signed (void) {
1520 // CHECK-LABEL: test_vec_sraq_signed
1521 // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}}
1522 // CHECK: ret <1 x i128>
1523 return vec_sra(vi128a, vui128a);
1524 }
1525
1526
test_vec_test_lsbb_all_ones(void)1527 int test_vec_test_lsbb_all_ones(void) {
1528 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i32 1
1529 // CHECK-NEXT: ret i32
1530 return vec_test_lsbb_all_ones(vuca);
1531 }
1532
test_vec_test_lsbb_all_zeros(void)1533 int test_vec_test_lsbb_all_zeros(void) {
1534 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i32 0
1535 // CHECK-NEXT: ret i32
1536 return vec_test_lsbb_all_zeros(vuca);
1537 }
1538
test_vec_mule_u128(void)1539 vector unsigned __int128 test_vec_mule_u128(void) {
1540 // CHECK-BE: @llvm.ppc.altivec.vmuleud(<2 x i64>
1541 // CHECK-BE-NEXT: ret <1 x i128>
1542 // CHECK-LE: @llvm.ppc.altivec.vmuloud(<2 x i64>
1543 // CHECK-LE-NEXT: ret <1 x i128>
1544 return vec_mule(vulla, vullb);
1545 }
1546
test_vec_mule_s128(void)1547 vector signed __int128 test_vec_mule_s128(void) {
1548 // CHECK-BE: @llvm.ppc.altivec.vmulesd(<2 x i64>
1549 // CHECK-BE-NEXT: ret <1 x i128>
1550 // CHECK-LE: @llvm.ppc.altivec.vmulosd(<2 x i64>
1551 // CHECK-LE-NEXT: ret <1 x i128>
1552 return vec_mule(vslla, vsllb);
1553 }
1554
test_vec_mulo_u128(void)1555 vector unsigned __int128 test_vec_mulo_u128(void) {
1556 // CHECK-BE: @llvm.ppc.altivec.vmuloud(<2 x i64>
1557 // CHECK-BE-NEXT: ret <1 x i128>
1558 // CHECK-LE: @llvm.ppc.altivec.vmuleud(<2 x i64>
1559 // CHECK-LE-NEXT: ret <1 x i128>
1560 return vec_mulo(vulla, vullb);
1561 }
1562
test_vec_mulo_s128(void)1563 vector signed __int128 test_vec_mulo_s128(void) {
1564 // CHECK-BE: @llvm.ppc.altivec.vmulosd(<2 x i64>
1565 // CHECK-BE-NEXT: ret <1 x i128>
1566 // CHECK-LE: @llvm.ppc.altivec.vmulesd(<2 x i64>
1567 // CHECK-LE-NEXT: ret <1 x i128>
1568 return vec_mulo(vslla, vsllb);
1569 }
1570
test_vec_msumc_u128(void)1571 vector unsigned __int128 test_vec_msumc_u128(void) {
1572 // CHECK: @llvm.ppc.altivec.vmsumcud(<2 x i64>
1573 // CHECK-NEXT: ret <1 x i128>
1574 return vec_msumc(vulla, vullb, vui128a);
1575 }
1576
test_vec_xl_sext_i8(void)1577 vector signed __int128 test_vec_xl_sext_i8(void) {
1578 // CHECK: load i8
1579 // CHECK: sext i8
1580 // CHECK: ret <1 x i128>
1581 return vec_xl_sext(llb, cap);
1582 }
1583
test_vec_xl_sext_i16(void)1584 vector signed __int128 test_vec_xl_sext_i16(void) {
1585 // CHECK: load i16
1586 // CHECK: sext i16
1587 // CHECK: ret <1 x i128>
1588 return vec_xl_sext(llb, sap);
1589 }
1590
test_vec_xl_sext_i32(void)1591 vector signed __int128 test_vec_xl_sext_i32(void) {
1592 // CHECK: load i32
1593 // CHECK: sext i32
1594 // CHECK: ret <1 x i128>
1595 return vec_xl_sext(llb, iap);
1596 }
1597
test_vec_xl_sext_i64(void)1598 vector signed __int128 test_vec_xl_sext_i64(void) {
1599 // CHECK: load i64
1600 // CHECK: sext i64
1601 // CHECK: ret <1 x i128>
1602 return vec_xl_sext(llb, llap);
1603 }
1604
test_vec_xl_zext_i8(void)1605 vector unsigned __int128 test_vec_xl_zext_i8(void) {
1606 // CHECK: load i8
1607 // CHECK: zext i8
1608 // CHECK: ret <1 x i128>
1609 return vec_xl_zext(llb, ucap);
1610 }
1611
test_vec_xl_zext_i16(void)1612 vector unsigned __int128 test_vec_xl_zext_i16(void) {
1613 // CHECK: load i16
1614 // CHECK: zext i16
1615 // CHECK: ret <1 x i128>
1616 return vec_xl_zext(llb, usap);
1617 }
1618
test_vec_xl_zext_i32(void)1619 vector unsigned __int128 test_vec_xl_zext_i32(void) {
1620 // CHECK: load i32
1621 // CHECK: zext i32
1622 // CHECK: ret <1 x i128>
1623 return vec_xl_zext(llb, uiap);
1624 }
1625
test_vec_xl_zext_i64(void)1626 vector unsigned __int128 test_vec_xl_zext_i64(void) {
1627 // CHECK: load i64
1628 // CHECK: zext i64
1629 // CHECK: ret <1 x i128>
1630 return vec_xl_zext(llb, ullap);
1631 }
1632
test_vec_signextq_s128(void)1633 vector signed __int128 test_vec_signextq_s128(void) {
1634 // CHECK: @llvm.ppc.altivec.vextsd2q(<2 x i64>
1635 // CHECK-NEXT: ret <1 x i128>
1636 return vec_signextq(vslla);
1637 }
1638
test_vec_mod_u128(void)1639 vector unsigned __int128 test_vec_mod_u128(void) {
1640 // CHECK: urem <1 x i128>
1641 // CHECK-NEXT: ret <1 x i128>
1642 return vec_mod(vui128a, vui128b);
1643 }
1644
test_vec_mod_s128(void)1645 vector signed __int128 test_vec_mod_s128(void) {
1646 // CHECK: srem <1 x i128>
1647 // CHECK-NEXT: ret <1 x i128>
1648 return vec_mod(vsi128a, vsi128b);
1649 }
1650
test_vec_cmpeq_s128(void)1651 vector bool __int128 test_vec_cmpeq_s128(void) {
1652 // CHECK-LABEL: @test_vec_cmpeq_s128(
1653 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1654 // CHECK-NEXT: ret <1 x i128>
1655 return vec_cmpeq(vsi128a, vsi128b);
1656 }
1657
test_vec_cmpeq_u128(void)1658 vector bool __int128 test_vec_cmpeq_u128(void) {
1659 // CHECK-LABEL: @test_vec_cmpeq_u128(
1660 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1661 // CHECK-NEXT: ret <1 x i128>
1662 return vec_cmpeq(vui128a, vui128b);
1663 }
1664
test_vec_cmpeq_bool_int128(void)1665 vector bool __int128 test_vec_cmpeq_bool_int128(void) {
1666 // CHECK-LABEL: @test_vec_cmpeq_bool_int128(
1667 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1668 // CHECK-NEXT: ret <1 x i128>
1669 return vec_cmpeq(vbi128a, vbi128b);
1670 }
1671
test_vec_cmpne_s128(void)1672 vector bool __int128 test_vec_cmpne_s128(void) {
1673 // CHECK-LABEL: @test_vec_cmpne_s128(
1674 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1675 // CHECK-NEXT: %neg.i = xor <1 x i128> %4, <i128 -1>
1676 // CHECK-NEXT: ret <1 x i128> %neg.i
1677 return vec_cmpne(vsi128a, vsi128b);
1678 }
1679
test_vec_cmpne_u128(void)1680 vector bool __int128 test_vec_cmpne_u128(void) {
1681 // CHECK-LABEL: @test_vec_cmpne_u128(
1682 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1683 // CHECK-NEXT: %neg.i = xor <1 x i128> %4, <i128 -1>
1684 // CHECK-NEXT: ret <1 x i128>
1685 return vec_cmpne(vui128a, vui128b);
1686 }
1687
test_vec_cmpne_bool_int128(void)1688 vector bool __int128 test_vec_cmpne_bool_int128(void) {
1689 // CHECK-LABEL: @test_vec_cmpne_bool_int128(
1690 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1691 // CHECK-NEXT: %neg.i = xor <1 x i128> %4, <i128 -1>
1692 // CHECK-NEXT: ret <1 x i128>
1693 return vec_cmpne(vbi128a, vbi128b);
1694 }
1695
test_vec_cmpgt_s128(void)1696 vector bool __int128 test_vec_cmpgt_s128(void) {
1697 // CHECK-LABEL: @test_vec_cmpgt_s128(
1698 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1699 // CHECK-NEXT: ret <1 x i128>
1700 return vec_cmpgt(vsi128a, vsi128b);
1701 }
1702
test_vec_cmpgt_u128(void)1703 vector bool __int128 test_vec_cmpgt_u128(void) {
1704 // CHECK-LABEL: @test_vec_cmpgt_u128(
1705 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1706 // CHECK-NEXT: ret <1 x i128>
1707 return vec_cmpgt(vui128a, vui128b);
1708 }
1709
test_vec_cmplt_s128(void)1710 vector bool __int128 test_vec_cmplt_s128(void) {
1711 // CHECK-LABEL: @test_vec_cmplt_s128(
1712 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1713 // CHECK-NEXT: ret <1 x i128>
1714 return vec_cmplt(vsi128a, vsi128b);
1715 }
1716
test_vec_cmplt_u128(void)1717 vector bool __int128 test_vec_cmplt_u128(void) {
1718 // CHECK-LABEL: @test_vec_cmplt_u128(
1719 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1720 // CHECK-NEXT: ret <1 x i128>
1721 return vec_cmplt(vui128a, vui128b);
1722 }
1723
test_vec_cmpge_s128(void)1724 vector bool __int128 test_vec_cmpge_s128(void) {
1725 // CHECK-LABEL: @test_vec_cmpge_s128(
1726 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1727 // CHECK-NEXT: %neg.i = xor <1 x i128> %6, <i128 -1>
1728 // CHECK-NEXT: ret <1 x i128>
1729 return vec_cmpge(vsi128a, vsi128b);
1730 }
1731
test_vec_cmpge_u128(void)1732 vector bool __int128 test_vec_cmpge_u128(void) {
1733 // CHECK-LABEL: @test_vec_cmpge_u128(
1734 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1735 // CHECK-NEXT: %neg.i = xor <1 x i128> %6, <i128 -1>
1736 // CHECK-NEXT: ret <1 x i128>
1737 return vec_cmpge(vui128a, vui128b);
1738 }
1739
test_vec_cmple_s128(void)1740 vector bool __int128 test_vec_cmple_s128(void) {
1741 // CHECK-LABEL: @test_vec_cmple_s128(
1742 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1743 // CHECK-NEXT: %neg.i.i = xor <1 x i128> %8, <i128 -1>
1744 // CHECK-NEXT: ret <1 x i128>
1745 return vec_cmple(vsi128a, vsi128b);
1746 }
1747
test_vec_cmple_u128(void)1748 vector bool __int128 test_vec_cmple_u128(void) {
1749 // CHECK-LABEL: @test_vec_cmple_u128(
1750 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1751 // CHECK-NEXT: %neg.i.i = xor <1 x i128> %8, <i128 -1>
1752 // CHECK-NEXT: ret <1 x i128>
1753 return vec_cmple(vui128a, vui128b);
1754 }
1755
test_vec_any_eq_u128(void)1756 int test_vec_any_eq_u128(void) {
1757 // CHECK-LABEL: @test_vec_any_eq_u128(
1758 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1759 // CHECK-NEXT: ret i32
1760 return vec_any_eq(vui128a, vui128b);
1761 }
1762
test_vec_any_eq_s128(void)1763 int test_vec_any_eq_s128(void) {
1764 // CHECK-LABEL: @test_vec_any_eq_s128(
1765 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1766 // CHECK-NEXT: ret i32
1767 return vec_any_eq(vsi128a, vsi128b);
1768 }
1769
test_vec_any_eq_bool_int128(void)1770 int test_vec_any_eq_bool_int128(void) {
1771 // CHECK-LABEL: @test_vec_any_eq_bool_int128(
1772 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1773 // CHECK-NEXT: ret i32
1774 return vec_any_eq(vbi128a, vbi128b);
1775 }
1776
test_vec_any_ne_s128(void)1777 int test_vec_any_ne_s128(void) {
1778 // CHECK-LABEL: @test_vec_any_ne_s128(
1779 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1780 // CHECK-NEXT: ret i32
1781 return vec_any_ne(vsi128a, vsi128b);
1782 }
1783
test_vec_any_ne_u128(void)1784 int test_vec_any_ne_u128(void) {
1785 // CHECK-LABEL: @test_vec_any_ne_u128(
1786 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1787 // CHECK-NEXT: ret i32
1788 return vec_any_ne(vui128a, vui128b);
1789 }
1790
test_vec_any_ne_bool_int128(void)1791 int test_vec_any_ne_bool_int128(void) {
1792 // CHECK-LABEL: @test_vec_any_ne_bool_int128(
1793 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1794 // CHECK-NEXT: ret i32
1795 return vec_any_ne(vbi128a, vbi128b);
1796 }
1797
test_vec_any_lt_s128(void)1798 int test_vec_any_lt_s128(void) {
1799 // CHECK-LABEL: @test_vec_any_lt_s128(
1800 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1801 // CHECK-NEXT: ret i32
1802 return vec_any_lt(vsi128a, vsi128b);
1803 }
1804
test_vec_any_lt_u128(void)1805 int test_vec_any_lt_u128(void) {
1806 // CHECK-LABEL: @test_vec_any_lt_u128(
1807 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1808 // CHECK-NEXT: ret i32
1809 return vec_any_lt(vui128a, vui128b);
1810 }
1811
test_vec_any_gt_s128(void)1812 int test_vec_any_gt_s128(void) {
1813 // CHECK-LABEL: @test_vec_any_gt_s128(
1814 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1815 // CHECK-NEXT: ret i32
1816 return vec_any_gt(vsi128a, vsi128b);
1817 }
1818
test_vec_any_gt_u128(void)1819 int test_vec_any_gt_u128(void) {
1820 // CHECK-LABEL: @test_vec_any_gt_u128(
1821 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1822 // CHECK-NEXT: ret i32
1823 return vec_any_gt(vui128a, vui128b);
1824 }
1825
test_vec_any_le_s128(void)1826 int test_vec_any_le_s128(void) {
1827 // CHECK-LABEL: @test_vec_any_le_s128(
1828 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1829 // CHECK-NEXT: ret i32
1830 return vec_any_le(vsi128a, vsi128b);
1831 }
1832
test_vec_any_le_u128(void)1833 int test_vec_any_le_u128(void) {
1834 // CHECK-LABEL: @test_vec_any_le_u128(
1835 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1836 // CHECK-NEXT: ret i32
1837 return vec_any_le(vui128a, vui128b);
1838 }
1839
test_vec_any_ge_s128(void)1840 int test_vec_any_ge_s128(void) {
1841 // CHECK-LABEL: @test_vec_any_ge_s128(
1842 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1843 // CHECK-NEXT: ret i32
1844 return vec_any_ge(vsi128a, vsi128b);
1845 }
1846
test_vec_any_ge_u128(void)1847 int test_vec_any_ge_u128(void) {
1848 // CHECK-LABEL: @test_vec_any_ge_u128(
1849 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1850 // CHECK-NEXT: ret i32
1851 return vec_any_ge(vui128a, vui128b);
1852 }
1853
test_vec_all_eq_s128(void)1854 int test_vec_all_eq_s128(void) {
1855 // CHECK-LABEL: @test_vec_all_eq_s128(
1856 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1857 // CHECK-NEXT: ret i32
1858 return vec_all_eq(vsi128a, vsi128b);
1859 }
1860
test_vec_all_eq_u128(void)1861 int test_vec_all_eq_u128(void) {
1862 // CHECK-LABEL: @test_vec_all_eq_u128(
1863 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1864 // CHECK-NEXT: ret i32
1865 return vec_all_eq(vui128a, vui128b);
1866 }
1867
test_vec_all_eq_bool_int128(void)1868 int test_vec_all_eq_bool_int128(void) {
1869 // CHECK-LABEL: @test_vec_all_eq_bool_int128
1870 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1871 // CHECK-NEXT: ret i32
1872 return vec_all_eq(vbi128a, vbi128b);
1873 }
1874
test_vec_all_ne_s128(void)1875 int test_vec_all_ne_s128(void) {
1876 // CHECK-LABEL: @test_vec_all_ne_s128(
1877 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1878 // CHECK-NEXT: ret i32
1879 return vec_all_ne(vsi128a, vsi128b);
1880 }
1881
test_vec_all_ne_u128(void)1882 int test_vec_all_ne_u128(void) {
1883 // CHECK-LABEL: @test_vec_all_ne_u128(
1884 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1885 // CHECK-NEXT: ret i32
1886 return vec_all_ne(vui128a, vui128b);
1887 }
1888
test_vec_all_ne_bool_int128(void)1889 int test_vec_all_ne_bool_int128(void) {
1890 // CHECK-LABEL: test_vec_all_ne_bool_int128
1891 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1892 // CHECK-NEXT: ret i32
1893 return vec_all_ne(vbi128a, vbi128b);
1894 }
1895
test_vec_all_lt_s128(void)1896 int test_vec_all_lt_s128(void) {
1897 // CHECK-LABEL: @test_vec_all_lt_s128(
1898 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1899 // CHECK-NEXT: ret i32
1900 return vec_all_lt(vsi128a, vsi128b);
1901 }
1902
test_vec_all_lt_u128(void)1903 int test_vec_all_lt_u128(void) {
1904 // CHECK-LABEL: @test_vec_all_lt_u128(
1905 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1906 // CHECK: ret i32
1907 return vec_all_lt(vui128a, vui128b);
1908 }
1909
test_vec_all_gt_s128(void)1910 int test_vec_all_gt_s128(void) {
1911 // CHECK-LABEL: @test_vec_all_gt_s128(
1912 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1913 // CHECK-NEXT: ret i32
1914 return vec_all_gt(vsi128a, vsi128b);
1915 }
1916
test_vec_all_gt_u128(void)1917 int test_vec_all_gt_u128(void) {
1918 // CHECK-LABEL: @test_vec_all_gt_u128(
1919 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
1920 // CHECK-NEXT: ret i32
1921 return vec_all_gt(vui128a, vui128b);
1922 }
1923
test_vec_all_le_s128(void)1924 int test_vec_all_le_s128(void) {
1925 // CHECK-LABEL: @test_vec_all_le_s128(
1926 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1927 // CHECK-NEXT: ret i32
1928 return vec_all_le(vsi128a, vsi128b);
1929 }
1930
test_vec_all_le_u128(void)1931 int test_vec_all_le_u128(void) {
1932 // CHECK-LABEL: @test_vec_all_le_u128(
1933 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1934 // CHECK-NEXT: ret i32
1935 return vec_all_le(vui128a, vui128b);
1936 }
1937
test_vec_all_ge_s128(void)1938 int test_vec_all_ge_s128(void) {
1939 // CHECK-LABEL: @test_vec_all_ge_s128(
1940 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1941 // CHECK-NEXT: ret i32
1942 return vec_all_ge(vsi128a, vsi128b);
1943 }
1944
test_vec_all_ge_u128(void)1945 int test_vec_all_ge_u128(void) {
1946 // CHECK-LABEL: @test_vec_all_ge_u128(
1947 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
1948 // CHECK-NEXT: ret i32
1949 return vec_all_ge(vui128a, vui128b);
1950 }
1951
test_vec_rl_s128(void)1952 vector signed __int128 test_vec_rl_s128(void) {
1953 // CHECK-LABEL: @test_vec_rl_s128(
1954 // CHECK: sub <1 x i128>
1955 // CHECK-NEXT: lshr <1 x i128>
1956 // CHECK-NEXT: or <1 x i128>
1957 // CHECK-NEXT: ret <1 x i128>
1958 return vec_rl(vsi128a, vsi128b);
1959 }
1960
test_vec_rl_u128(void)1961 vector unsigned __int128 test_vec_rl_u128(void) {
1962 // CHECK-LABEL: @test_vec_rl_u128(
1963 // CHECK: sub <1 x i128>
1964 // CHECK: lshr <1 x i128>
1965 // CHECK: or <1 x i128>
1966 // CHECK-NEXT: ret <1 x i128>
1967 return vec_rl(vui128a, vui128b);
1968 }
1969
test_vec_rlnm_s128(void)1970 vector signed __int128 test_vec_rlnm_s128(void) {
1971 // CHECK-LABEL: @test_vec_rlnm_s128(
1972 // CHECK-LE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1973 // CHECK-BE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1974 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqnm(<1 x i128>
1975 // CHECK-NEXT: ret <1 x i128>
1976 return vec_rlnm(vsi128a, vsi128b, vsi128c);
1977 }
1978
test_vec_rlnm_u128(void)1979 vector unsigned __int128 test_vec_rlnm_u128(void) {
1980 // CHECK-LABEL: @test_vec_rlnm_u128(
1981 // CHECK-LE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1982 // CHECK-BE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1983 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqnm(<1 x i128>
1984 // CHECK-NEXT: ret <1 x i128>
1985 return vec_rlnm(vui128a, vui128b, vui128c);
1986 }
1987
test_vec_rlmi_s128(void)1988 vector signed __int128 test_vec_rlmi_s128(void) {
1989 // CHECK-LABEL: @test_vec_rlmi_s128(
1990 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqmi(<1 x i128>
1991 // CHECK-NEXT: ret <1 x i128>
1992 return vec_rlmi(vsi128a, vsi128b, vsi128c);
1993 }
1994
test_vec_rlmi_u128(void)1995 vector unsigned __int128 test_vec_rlmi_u128(void) {
1996 // CHECK-LABEL: @test_vec_rlmi_u128(
1997 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqmi(<1 x i128>
1998 // CHECK-NEXT: ret <1 x i128>
1999 return vec_rlmi(vui128a, vui128b, vui128c);
2000 }
2001