1 /* { dg-do compile { target { powerpc*-*-linux* && lp64 } } } */
2 /* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
3 /* { dg-skip-if "" { powerpc*-*-*spe* } { "*" } { "" } } */
4 /* { dg-require-effective-target powerpc_p8vector_ok } */
5 /* { dg-options "-mcpu=power8 -O2" } */
6 /* { dg-final { scan-assembler-times "bcdadd\[.\] " 4 } } */
7 /* { dg-final { scan-assembler-times "bcdsub\[.\] " 4 } } */
8 /* { dg-final { scan-assembler-not "bl __builtin" } } */
9 /* { dg-final { scan-assembler-not "mtvsr" } } */
10 /* { dg-final { scan-assembler-not "mfvsr" } } */
11 /* { dg-final { scan-assembler-not "lvx" } } */
12 /* { dg-final { scan-assembler-not "lxvw4x" } } */
13 /* { dg-final { scan-assembler-not "lxvd2x" } } */
14 /* { dg-final { scan-assembler-not "stvx" } } */
15 /* { dg-final { scan-assembler-not "stxvw4x" } } */
16 /* { dg-final { scan-assembler-not "stxvd2x" } } */
17
18 typedef __int128_t __attribute__((__vector_size__(16))) vector_128_t;
19 typedef __int128_t scalar_128_t;
20 typedef unsigned long long scalar_64_t;
21
22 /* Test whether the peephole works to allow folding a bcdadd, with a
23 bcdadd_<test> into a single instruction. */
24
25 vector_128_t
do_add_lt(vector_128_t a,vector_128_t b,int * p)26 do_add_lt (vector_128_t a, vector_128_t b, int *p)
27 {
28 vector_128_t ret = __builtin_bcdadd (a, b, 0);
29 if (__builtin_bcdadd_lt (a, b, 0))
30 *p = 1;
31
32 return ret;
33 }
34
35 vector_128_t
do_add_eq(vector_128_t a,vector_128_t b,int * p)36 do_add_eq (vector_128_t a, vector_128_t b, int *p)
37 {
38 vector_128_t ret = __builtin_bcdadd (a, b, 0);
39 if (__builtin_bcdadd_eq (a, b, 0))
40 *p = 1;
41
42 return ret;
43 }
44
45 vector_128_t
do_add_gt(vector_128_t a,vector_128_t b,int * p)46 do_add_gt (vector_128_t a, vector_128_t b, int *p)
47 {
48 vector_128_t ret = __builtin_bcdadd (a, b, 0);
49 if (__builtin_bcdadd_gt (a, b, 0))
50 *p = 1;
51
52 return ret;
53 }
54
55 vector_128_t
do_add_ov(vector_128_t a,vector_128_t b,int * p)56 do_add_ov (vector_128_t a, vector_128_t b, int *p)
57 {
58 vector_128_t ret = __builtin_bcdadd (a, b, 0);
59 if (__builtin_bcdadd_ov (a, b, 0))
60 *p = 1;
61
62 return ret;
63 }
64
65 vector_128_t
do_sub_lt(vector_128_t a,vector_128_t b,int * p)66 do_sub_lt (vector_128_t a, vector_128_t b, int *p)
67 {
68 vector_128_t ret = __builtin_bcdsub (a, b, 0);
69 if (__builtin_bcdsub_lt (a, b, 0))
70 *p = 1;
71
72 return ret;
73 }
74
75 vector_128_t
do_sub_eq(vector_128_t a,vector_128_t b,int * p)76 do_sub_eq (vector_128_t a, vector_128_t b, int *p)
77 {
78 vector_128_t ret = __builtin_bcdsub (a, b, 0);
79 if (__builtin_bcdsub_eq (a, b, 0))
80 *p = 1;
81
82 return ret;
83 }
84
85 vector_128_t
do_sub_gt(vector_128_t a,vector_128_t b,int * p)86 do_sub_gt (vector_128_t a, vector_128_t b, int *p)
87 {
88 vector_128_t ret = __builtin_bcdsub (a, b, 0);
89 if (__builtin_bcdsub_gt (a, b, 0))
90 *p = 1;
91
92 return ret;
93 }
94
95 vector_128_t
do_sub_ov(vector_128_t a,vector_128_t b,int * p)96 do_sub_ov (vector_128_t a, vector_128_t b, int *p)
97 {
98 vector_128_t ret = __builtin_bcdsub (a, b, 0);
99 if (__builtin_bcdsub_ov (a, b, 0))
100 *p = 1;
101
102 return ret;
103 }
104