1 /* Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License as published by
5    the Free Software Foundation; version 2 of the License.
6 
7    This program is distributed in the hope that it will be useful,
8    but WITHOUT ANY WARRANTY; without even the implied warranty of
9    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10    GNU General Public License for more details.
11 
12    You should have received a copy of the GNU General Public License
13    along with this program; if not, write to the Free Software
14    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335  USA */
15 
16 #include "thr_template.c"
17 
18 volatile uint32 b32;
19 volatile int32  c32;
20 
21 /* add and sub a random number in a loop. Must get 0 at the end */
test_atomic_add(void * arg)22 pthread_handler_t test_atomic_add(void *arg)
23 {
24   int m= (*(int *)arg)/2;
25   int32 x;
26   for (x= ((int)(intptr)(&m)); m ; m--)
27   {
28     x= (x*m+0x87654321) & INT_MAX32;
29     my_atomic_add32(&bad, x);
30     my_atomic_add32(&bad, -x);
31   }
32   return 0;
33 }
34 
35 volatile int64 a64;
36 /* add and sub a random number in a loop. Must get 0 at the end */
test_atomic_add64(void * arg)37 pthread_handler_t test_atomic_add64(void *arg)
38 {
39   int m= (*(int *)arg)/2;
40   int64 x;
41   for (x= ((int64)(intptr)(&m)); m ; m--)
42   {
43     x= (x*m+0xfdecba987654321LL) & INT_MAX64;
44     my_atomic_add64(&a64, x);
45     my_atomic_add64(&a64, -x);
46   }
47   return 0;
48 }
49 
50 
51 /*
52   1. generate thread number 0..N-1 from b32
53   2. add it to bad
54   3. swap thread numbers in c32
55   4. (optionally) one more swap to avoid 0 as a result
56   5. subtract result from bad
57   must get 0 in bad at the end
58 */
test_atomic_fas(void * arg)59 pthread_handler_t test_atomic_fas(void *arg)
60 {
61   int    m= *(int *)arg;
62   int32  x;
63 
64   x= my_atomic_add32(&b32, 1);
65 
66   my_atomic_add32(&bad, x);
67 
68   for (; m ; m--)
69     x= my_atomic_fas32(&c32, x);
70 
71   if (!x)
72     x= my_atomic_fas32(&c32, x);
73 
74   my_atomic_add32(&bad, -x);
75 
76   return 0;
77 }
78 
79 /*
80   same as test_atomic_add, but my_atomic_add32 is emulated with
81   my_atomic_cas32 - notice that the slowdown is proportional to the
82   number of CPUs
83 */
test_atomic_cas(void * arg)84 pthread_handler_t test_atomic_cas(void *arg)
85 {
86   int m= (*(int *)arg)/2, ok= 0;
87   int32 x, y;
88   for (x= ((int)(intptr)(&m)); m ; m--)
89   {
90     y= my_atomic_load32(&bad);
91     x= (x*m+0x87654321) & INT_MAX32;
92     do {
93       ok= my_atomic_cas32((int32*) &bad, &y, y+x);
94     } while (!ok) ;
95     do {
96       ok= my_atomic_cas32((int32*) &bad, &y, y-x);
97     } while (!ok) ;
98   }
99   return 0;
100 }
101 
102 
do_tests()103 void do_tests()
104 {
105   plan(5);
106 
107   b32= c32= 0;
108   test_concurrently("my_atomic_add32", test_atomic_add, THREADS, CYCLES);
109   b32= c32= 0;
110   test_concurrently("my_atomic_fas32", test_atomic_fas, THREADS, CYCLES);
111   b32= c32= 0;
112   test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES);
113 
114   {
115     /*
116       If b is not volatile, the wrong assembly code is generated on OSX Lion
117       as the variable is optimized away as a constant.
118       See Bug#62533 / Bug#13030056.
119       Another workaround is to specify architecture explicitly using e.g.
120       CFLAGS/CXXFLAGS= "-m64".
121     */
122     volatile int64 b=0x1000200030004000LL;
123     a64=0;
124     my_atomic_add64(&a64, b);
125     ok(a64==b, "add64");
126   }
127   a64=0;
128   test_concurrently("my_atomic_add64", test_atomic_add64, THREADS, CYCLES);
129   bad= (a64 != 0);
130 }
131