1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2010-2016. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 /*
22  * Description: Optimized fallbacks used when native ops are missing
23  * Author: Rickard Green
24  */
25 
26 #ifndef ETHR_OPTIMIZED_FALLBACKS_H__
27 #define ETHR_OPTIMIZED_FALLBACKS_H__
28 
29 #if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
30 
31 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
32 
33 static ETHR_INLINE int
ethr_native_spinlock_destroy(ethr_native_spinlock_t * lock)34 ethr_native_spinlock_destroy(ethr_native_spinlock_t *lock)
35 {
36     return 0;
37 }
38 
39 #endif
40 
41 #elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
42 /* --- Native spinlocks using pthread spinlocks -------------------------- */
43 #define ETHR_HAVE_NATIVE_SPINLOCKS 1
44 
45 #define ETHR_NATIVE_SPINLOCK_IMPL "pthread"
46 
47 typedef pthread_spinlock_t ethr_native_spinlock_t;
48 
49 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
50 
51 static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t * lock)52 ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
53 {
54     int err = pthread_spin_init((pthread_spinlock_t *) lock, 0);
55     if (err)
56 	ETHR_FATAL_ERROR__(err);
57 }
58 
59 static ETHR_INLINE int
ethr_native_spinlock_destroy(ethr_native_spinlock_t * lock)60 ethr_native_spinlock_destroy(ethr_native_spinlock_t *lock)
61 {
62     return pthread_spin_destroy((pthread_spinlock_t *) lock);
63 }
64 
65 static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t * lock)66 ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
67 {
68     int err = pthread_spin_unlock((pthread_spinlock_t *) lock);
69     if (err)
70 	ETHR_FATAL_ERROR__(err);
71 }
72 
73 static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t * lock)74 ethr_native_spin_lock(ethr_native_spinlock_t *lock)
75 {
76     int err = pthread_spin_lock((pthread_spinlock_t *) lock);
77     if (err)
78 	ETHR_FATAL_ERROR__(err);
79 }
80 
81 #endif
82 
83 #elif defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
84 /* --- Native spinlocks using native atomics -------------------------------- */
85 #define ETHR_HAVE_NATIVE_SPINLOCKS 1
86 
87 #define ETHR_NATIVE_SPINLOCK_IMPL "native-atomics"
88 
89 typedef ethr_atomic32_t ethr_native_spinlock_t;
90 
91 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
92 
93 #undef ETHR_NSPN_AOP__
94 #define ETHR_NSPN_AOP__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
95 
96 static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t * lock)97 ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
98 {
99     ETHR_NSPN_AOP__(init)(lock, 0);
100 }
101 
102 static ETHR_INLINE int
ethr_native_spinlock_destroy(ethr_native_spinlock_t * lock)103 ethr_native_spinlock_destroy(ethr_native_spinlock_t *lock)
104 {
105     return ETHR_NSPN_AOP__(read)(lock) == 0 ? 0 : EBUSY;
106 }
107 
108 static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t * lock)109 ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
110 {
111     ETHR_ASSERT(ETHR_NSPN_AOP__(read)(lock) == 1);
112     ETHR_NSPN_AOP__(set_relb)(lock, 0);
113 }
114 
115 static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t * lock)116 ethr_native_spin_lock(ethr_native_spinlock_t *lock)
117 {
118     while (ETHR_NSPN_AOP__(cmpxchg_acqb)(lock, 1, 0) != 0) {
119 	while (ETHR_NSPN_AOP__(read)(lock) != 0)
120 	    ETHR_SPIN_BODY;
121     }
122     ETHR_COMPILER_BARRIER;
123 }
124 
125 #undef ETHR_NSPN_AOP__
126 
127 #endif
128 
129 #endif
130 
131 
132 #if defined(ETHR_HAVE_NATIVE_RWSPINLOCKS)
133 
134 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
135 
136 static ETHR_INLINE int
ethr_native_rwlock_destroy(ethr_native_rwlock_t * lock)137 ethr_native_rwlock_destroy(ethr_native_rwlock_t *lock)
138 {
139     return 0;
140 }
141 
142 #endif
143 
144 #elif defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
145 /* --- Native rwspinlocks using native atomics ------------------------------ */
146 #define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
147 #define ETHR_NATIVE_RWSPINLOCK_IMPL "native-atomics"
148 
149 typedef ethr_atomic32_t ethr_native_rwlock_t;
150 #  define ETHR_WLOCK_FLAG__ (((ethr_sint32_t) 1) << 30)
151 
152 #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
153 
154 #undef ETHR_NRWSPN_AOP__
155 #define ETHR_NRWSPN_AOP__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
156 
157 static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t * lock)158 ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
159 {
160     ETHR_NRWSPN_AOP__(init)(lock, 0);
161 }
162 
163 static ETHR_INLINE int
ethr_native_rwlock_destroy(ethr_native_rwlock_t * lock)164 ethr_native_rwlock_destroy(ethr_native_rwlock_t *lock)
165 {
166     return ETHR_NRWSPN_AOP__(read)(lock) == 0 ? 0 : EBUSY;
167 }
168 
169 static ETHR_INLINE void
ethr_native_read_unlock(ethr_native_rwlock_t * lock)170 ethr_native_read_unlock(ethr_native_rwlock_t *lock)
171 {
172     ETHR_ASSERT(ETHR_NRWSPN_AOP__(read)(lock) >= 0);
173     ETHR_NRWSPN_AOP__(dec_relb)(lock);
174 }
175 
176 static ETHR_INLINE void
ethr_native_read_lock(ethr_native_rwlock_t * lock)177 ethr_native_read_lock(ethr_native_rwlock_t *lock)
178 {
179     ethr_sint32_t act, exp = 0;
180     while (1) {
181 	act = ETHR_NRWSPN_AOP__(cmpxchg_acqb)(lock, exp+1, exp);
182 	if (act == exp)
183 	    break;
184 	/* Wait for writer to leave */
185 	while (act & ETHR_WLOCK_FLAG__) {
186 	    ETHR_SPIN_BODY;
187 	    act = ETHR_NRWSPN_AOP__(read)(lock);
188 	}
189 	exp = act;
190     }
191     ETHR_COMPILER_BARRIER;
192 }
193 
194 static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t * lock)195 ethr_native_write_unlock(ethr_native_rwlock_t *lock)
196 {
197     ETHR_ASSERT(ETHR_NRWSPN_AOP__(read)(lock) == ETHR_WLOCK_FLAG__);
198     ETHR_NRWSPN_AOP__(set_relb)(lock, 0);
199 }
200 
201 static ETHR_INLINE void
ethr_native_write_lock(ethr_native_rwlock_t * lock)202 ethr_native_write_lock(ethr_native_rwlock_t *lock)
203 {
204     ethr_sint32_t act, exp = 0;
205     while (1) {
206 	act = ETHR_NRWSPN_AOP__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
207 	if (act == exp)
208 	    break;
209 	/* Wait for writer to leave */
210 	while (act & ETHR_WLOCK_FLAG__) {
211 	    ETHR_SPIN_BODY;
212 	    act = ETHR_NRWSPN_AOP__(read)(lock);
213 	}
214 	exp = act;
215     }
216     act |= ETHR_WLOCK_FLAG__;
217     /* Wait for readers to leave */
218     while (act != ETHR_WLOCK_FLAG__) {
219 	ETHR_SPIN_BODY;
220 	act = ETHR_NRWSPN_AOP__(read_acqb)(lock);
221     }
222     ETHR_COMPILER_BARRIER;
223 }
224 
225 #undef ETHR_NRWSPN_AOP__
226 
227 #endif
228 
229 #endif
230 
231 #endif
232