1 /**
2 * @file spinlock.h
3 *
4 * This file attempts to implement spin locks for various platforms and/or CPU
5 * instruction sets.
6 */
7 #ifndef SPINLOCK_H
8 #define SPINLOCK_H
9
10 #define DEBUG_SPINLOCK 0
11
12 #define OPENPA 0
13
14 #if OPENPA
15 # if DEBUG_SPINLOCK
16 # warning SPINLOCK: openpa
17 # endif
18 # define SPINLOCK
19 # include "opa_primitives.h"
20 # define LOCK_T OPA_int_t
21 # define TESTANDSET(x) OPA_swap_int((x), 1)
22 # define MEMORY_BARRIER OPA_read_write_barrier
23
24 #elif (defined(PPC) || defined(__PPC__) || defined(__PPC))
25 # if DEBUG_SPINLOCK
26 # warning SPINLOCK: PPC
27 # endif
28 # define SPINLOCK
29 # include "asm-ppc.h"
30 //# define TESTANDSET testandset
31 //# define TESTANDSET acquireLock
32 # define armci_acquire_spinlock acquire_spinlock
33 # define armci_release_spinlock release_spinlock
34 # define MEMORY_BARRIER memory_barrier
testandset(void * spinlock)35 static int testandset(void *spinlock) {
36 int v=1;
37 atomic_exchange(&v,spinlock,sizeof(int));
38 return v;
39 }
memory_barrier()40 static void memory_barrier() {
41 __asm__ __volatile__ ("sync" : : : "memory");
42 }
43
44 #elif defined(__i386__) || defined(__x86_64__)
45 # if DEBUG_SPINLOCK
46 # warning SPINLOCK: x86_64
47 # endif
48 # define SPINLOCK
49 # include "atomics-i386.h"
testandset(void * spinlock)50 static int testandset(void *spinlock) {
51 int v=1;
52 atomic_exchange(&v,spinlock,sizeof(int));
53 return v;
54 }
55 # define TESTANDSET testandset
56
57 #elif defined(__ia64)
58 # if DEBUG_SPINLOCK
59 # warning SPINLOCK: ia64
60 # endif
61 # define SPINLOCK
62 # include "atomic_ops_ia64.h"
testandset(void * spinlock)63 static int testandset(void *spinlock) {
64 int val=1;
65 int res;
66 atomic_swap_int(spinlock, val, &res);
67 return res;
68 }
69 # define TESTANDSET testandset
70
71 #elif defined(DECOSF)
72 # if DEBUG_SPINLOCK
73 # warning SPINLOCK: DECOSF
74 # endif
75 # error "no implementation"
76
77 #elif defined(SGI)
78 # if DEBUG_SPINLOCK
79 # warning SPINLOCK: SGI
80 # endif
81 # include <mutex.h>
82 # define SPINLOCK
83 # define TESTANDSET(x) __lock_test_and_set((x), 1)
84 # define RELEASE_SPINLOCK __lock_release
85
86 /*#elif defined(AIX)*/
87 #elif HAVE_SYS_ATOMIC_OP_H
88 # if DEBUG_SPINLOCK
89 # warning SPINLOCK: sys/atomic_op.h (AIX)
90 # endif
91 # include <sys/atomic_op.h>
92 # define SPINLOCK
93 # define TESTANDSET(x) (_check_lock((x), 0, 1)==TRUE)
94 # define RELEASE_SPINLOCK(x) _clear_lock((x),0)
95
96 #elif defined(SOLARIS)
97 # if DEBUG_SPINLOCK
98 # warning SPINLOCK: SOLARIS
99 # endif
100 # include <sys/atomic.h>
101 # include <sys/machlock.h>
102 # define SPINLOCK
103 # define TESTANDSET(x) (!_lock_try((x)))
104 # define RELEASE_SPINLOCK _lock_clear
105
106 #elif defined(MACX)
107
108 #elif defined(HPUX__)
109 # if DEBUG_SPINLOCK
110 # warning SPINLOCK: HPUX__
111 # endif
112 extern int _acquire_lock();
113 extern void _release_lock();
114 # define SPINLOCK
115 # define TESTANDSET(x) (!_acquire_lock((x)))
116 # define RELEASE_SPINLOCK _release_lock
117
118 #elif defined(HPUX) && defined(__ia64) /* HPUX on IA64, non gcc */
119 # if DEBUG_SPINLOCK
120 # warning SPINLOCK: HPUX ia64
121 # endif
122 # define SPINLOCK
123 typedef unsigned int slock_t;
124 # include <ia64/sys/inline.h>
125 # define TESTANDSET(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
126 # define RELEASE_SPINLOCK(lock) (*((volatile LOCK_T *) (lock)) = 0)
127
128 #elif defined(NEC)
129 # if DEBUG_SPINLOCK
130 # warning SPINLOCK: NEC
131 # endif
132 extern ullong ts1am_2me();
133 # define LOCK_T ullong
134 # define _LKWD (1ULL << 63)
135 # define SPINLOCK
136 # define TESTANDSET(x) ((_LKWD & ts1am_2me(_LKWD, 0xffULL, (ullong)(x))))
137 # define MEMORY_BARRIER mpisx_clear_cache
138 extern void mpisx_clear_cache();
139 # define RELEASE_SPINLOCK(x) ts1am_2me(0ULL, 0xffULL, (ullong)x);
140
141 #endif
142
143 #ifdef SPINLOCK
144
145 #if DEBUG_
146 # if HAVE_STDIO_H
147 # include <stdio.h>
148 # endif
149 #endif
150
151 #if HAVE_UNISTD_H
152 # include <unistd.h>
153 #endif
154
155 #ifndef DBL_PAD
156 # define DBL_PAD 16
157 #endif
158
159 /* make sure that locks are not sharing the same cache line */
160 typedef struct{
161 double lock[DBL_PAD];
162 }pad_lock_t;
163
164 #ifndef LOCK_T
165 # define LOCK_T int
166 #endif
167 #define PAD_LOCK_T pad_lock_t
168
armci_init_spinlock(LOCK_T * mutex)169 static inline void armci_init_spinlock(LOCK_T *mutex)
170 {
171 #if OPENPA
172 OPA_store_int(mutex, 0);
173 #else
174 *mutex =0;
175 #endif
176 }
177
178 #ifdef TESTANDSET
179
armci_acquire_spinlock(LOCK_T * mutex)180 static inline void armci_acquire_spinlock(LOCK_T *mutex)
181 {
182 #if defined(BGML) || defined(DCMF)
183 return;
184 #else
185 int loop=0, maxloop =10;
186
187 while (TESTANDSET(mutex)){
188 loop++;
189 if(loop==maxloop){
190 # if DEBUG_
191 extern int armci_me;
192 printf("%d:spinlock sleeping\n",armci_me); fflush(stdout);
193 # endif
194 usleep(1);
195 loop=0;
196 }
197 }
198 #endif
199 }
200
201 #ifdef RELEASE_SPINLOCK
202 # ifdef MEMORY_BARRIER
203 # define armci_release_spinlock(x) MEMORY_BARRIER(); RELEASE_SPINLOCK(x)
204 # else
205 # define armci_release_spinlock(x) RELEASE_SPINLOCK(x)
206 # endif
207 #else
armci_release_spinlock(LOCK_T * mutex)208 static inline void armci_release_spinlock(LOCK_T *mutex)
209 {
210 #if defined(BGML) || defined(DCMF)
211 return;
212 #else
213 # ifdef MEMORY_BARRIER
214 MEMORY_BARRIER ();
215 # endif
216 #if OPENPA
217 OPA_store_int(mutex, 0);
218 #else
219 *mutex =0;
220 #endif
221 # ifdef MEMORY_BARRIER
222 MEMORY_BARRIER ();
223 # endif
224 # if (defined(MACX)||defined(LINUX)) && defined(__GNUC__) && defined(__ppc__)
225 __asm__ __volatile__ ("isync" : : : "memory");
226 # endif
227 #endif
228 }
229 #endif /* RELEASE_SPINLOCK */
230
231 #endif /* TESTANDSET */
232
233 #endif /* SPINLOCK */
234
235 #endif /* SPINLOCK_H */
236