1 /**
2 * @file spinlock.h
3 *
4 * This file attempts to implement spin locks for various platforms and/or CPU
5 * instruction sets.
6 */
7 #ifndef SPINLOCK_H
8 #define SPINLOCK_H
9
10 #define DEBUG_SPINLOCK 0
11
12 #define OPENPA 0
13
14 #if OPENPA
15 # if DEBUG_SPINLOCK
16 # warning SPINLOCK: openpa
17 # endif
18 # define SPINLOCK
19 # include "opa_primitives.h"
20 # define LOCK_T OPA_int_t
21 # define TESTANDSET(x) OPA_swap_int((x), 1)
22 # define MEMORY_BARRIER OPA_read_write_barrier
23
24 #elif (defined(PPC) || defined(__PPC__) || defined(__PPC))
25 # if DEBUG_SPINLOCK
26 # warning SPINLOCK: PPC
27 # endif
28 # define SPINLOCK
29 # include "asm-ppc.h"
30 //# define TESTANDSET testandset
31 //# define TESTANDSET acquireLock
32 # define armci_acquire_spinlock acquire_spinlock
33 # define armci_release_spinlock release_spinlock
34 # define MEMORY_BARRIER memory_barrier
testandset(void * spinlock)35 static int testandset(void *spinlock) {
36 int v=1;
37 atomic_exchange(&v,spinlock,sizeof(int));
38 return v;
39 }
memory_barrier()40 static void memory_barrier() {
41 __asm__ __volatile__ ("sync" : : : "memory");
42 }
43
44 #elif defined(__i386__) || defined(__x86_64__)
45 # if DEBUG_SPINLOCK
46 # warning SPINLOCK: x86_64
47 # endif
48 # define SPINLOCK
49 # include "atomics-i386.h"
testandset(void * spinlock)50 static int testandset(void *spinlock) {
51 int v=1;
52 atomic_exchange(&v,spinlock,sizeof(int));
53 return v;
54 }
55 # define TESTANDSET testandset
56
57 #elif defined(HPUX) && defined(__ia64) /* HPUX on IA64, non gcc */
58 # if DEBUG_SPINLOCK
59 # warning SPINLOCK: HPUX ia64
60 # endif
61 # define SPINLOCK
62 typedef unsigned int slock_t;
63 # include <ia64/sys/inline.h>
64 # define TESTANDSET(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
65 # define RELEASE_SPINLOCK(lock) (*((volatile LOCK_T *) (lock)) = 0)
66
67 #elif defined(__ia64)
68 # if DEBUG_SPINLOCK
69 # warning SPINLOCK: ia64
70 # endif
71 # define SPINLOCK
72 # include "atomic_ops_ia64.h"
testandset(void * spinlock)73 static int testandset(void *spinlock) {
74 int val=1;
75 int res;
76 atomic_swap_int(spinlock, val, &res);
77 return res;
78 }
79 # define TESTANDSET testandset
80
81 #elif defined(DECOSF)
82 # if DEBUG_SPINLOCK
83 # warning SPINLOCK: DECOSF
84 # endif
85 # error "no implementation"
86
87 #elif defined(SGI)
88 # if DEBUG_SPINLOCK
89 # warning SPINLOCK: SGI
90 # endif
91 # include <mutex.h>
92 # define SPINLOCK
93 # define TESTANDSET(x) __lock_test_and_set((x), 1)
94 # define RELEASE_SPINLOCK __lock_release
95
96 /*#elif defined(AIX)*/
97 #elif HAVE_SYS_ATOMIC_OP_H
98 # if DEBUG_SPINLOCK
99 # warning SPINLOCK: sys/atomic_op.h (AIX)
100 # endif
101 # include <sys/atomic_op.h>
102 # define SPINLOCK
103 # define TESTANDSET(x) (_check_lock((x), 0, 1)==TRUE)
104 # define RELEASE_SPINLOCK(x) _clear_lock((x),0)
105
106 #elif defined(SOLARIS)
107 # if DEBUG_SPINLOCK
108 # warning SPINLOCK: SOLARIS
109 # endif
110 # include <sys/atomic.h>
111 # include <sys/machlock.h>
112 # define SPINLOCK
113 # define TESTANDSET(x) (!_lock_try((x)))
114 # define RELEASE_SPINLOCK _lock_clear
115
116 #elif defined(MACX)
117
118 #elif defined(HPUX__)
119 # if DEBUG_SPINLOCK
120 # warning SPINLOCK: HPUX__
121 # endif
122 extern int _acquire_lock();
123 extern void _release_lock();
124 # define SPINLOCK
125 # define TESTANDSET(x) (!_acquire_lock((x)))
126 # define RELEASE_SPINLOCK _release_lock
127
128 #elif defined(NEC)
129 # if DEBUG_SPINLOCK
130 # warning SPINLOCK: NEC
131 # endif
132 extern ullong ts1am_2me();
133 # define LOCK_T ullong
134 # define _LKWD (1ULL << 63)
135 # define SPINLOCK
136 # define TESTANDSET(x) ((_LKWD & ts1am_2me(_LKWD, 0xffULL, (ullong)(x))))
137 # define MEMORY_BARRIER mpisx_clear_cache
138 extern void mpisx_clear_cache();
139 # define RELEASE_SPINLOCK(x) ts1am_2me(0ULL, 0xffULL, (ullong)x);
140
141 #endif
142
143 #ifdef SPINLOCK
144
145 #if DEBUG_
146 # if HAVE_STDIO_H
147 # include <stdio.h>
148 # endif
149 #endif
150
151 #if HAVE_UNISTD_H
152 # include <unistd.h>
153 #endif
154
155 #ifndef DBL_PAD
156 # define DBL_PAD 16
157 #endif
158
159 /* make sure that locks are not sharing the same cache line */
160 typedef struct{
161 double lock[DBL_PAD];
162 }pad_lock_t;
163
164 #ifndef LOCK_T
165 # define LOCK_T int
166 #endif
167 #define PAD_LOCK_T pad_lock_t
168
armci_init_spinlock(LOCK_T * mutex)169 static inline void armci_init_spinlock(LOCK_T *mutex)
170 {
171 #if OPENPA
172 OPA_store_int(mutex, 0);
173 #else
174 *mutex =0;
175 #endif
176 }
177
178 #ifdef TESTANDSET
179
armci_acquire_spinlock(LOCK_T * mutex)180 static inline void armci_acquire_spinlock(LOCK_T *mutex)
181 {
182 #if defined(BGML) || defined(DCMF)
183 return;
184 #else
185 int loop=0, maxloop =10;
186
187 while (TESTANDSET(mutex)){
188 loop++;
189 if(loop==maxloop){
190 # if DEBUG_
191 extern int armci_me;
192 printf("%d:spinlock sleeping\n",armci_me); fflush(stdout);
193 # endif
194 usleep(1);
195 loop=0;
196 }
197 }
198 #endif
199 }
200
201 #ifdef RELEASE_SPINLOCK
202 # ifdef MEMORY_BARRIER
203 # define armci_release_spinlock(x) MEMORY_BARRIER(); RELEASE_SPINLOCK(x)
204 # else
205 # define armci_release_spinlock(x) RELEASE_SPINLOCK(x)
206 # endif
207 #else
armci_release_spinlock(LOCK_T * mutex)208 static inline void armci_release_spinlock(LOCK_T *mutex)
209 {
210 #if defined(BGML) || defined(DCMF)
211 return;
212 #else
213 # ifdef MEMORY_BARRIER
214 MEMORY_BARRIER ();
215 # endif
216 #if OPENPA
217 OPA_store_int(mutex, 0);
218 #else
219 *mutex =0;
220 #endif
221 # ifdef MEMORY_BARRIER
222 MEMORY_BARRIER ();
223 # endif
224 # if (defined(MACX)||defined(LINUX)) && defined(__GNUC__) && defined(__ppc__)
225 __asm__ __volatile__ ("isync" : : : "memory");
226 # endif
227 #endif
228 }
229 #endif /* RELEASE_SPINLOCK */
230
231 #endif /* TESTANDSET */
232
233 #endif /* SPINLOCK */
234
235 #endif /* SPINLOCK_H */
236