1 /**
2 * \file
3 * Workarounds for atomic operations for platforms that dont have
4 * really atomic asm functions in atomic.h
5 *
6 * Author:
7 * Dick Porter (dick@ximian.com)
8 *
9 * (C) 2002 Ximian, Inc.
10 */
11
12 #include <config.h>
13 #include <glib.h>
14
15 #include <mono/utils/atomic.h>
16 #include <mono/utils/mono-compiler.h>
17
18 #if defined (WAPI_NO_ATOMIC_ASM) || defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
19
20 #include <pthread.h>
21
22 static pthread_mutex_t spin G_GNUC_UNUSED = PTHREAD_MUTEX_INITIALIZER;
23
24 #define NEED_64BIT_CMPXCHG_FALLBACK
25
26 #endif
27
28 #ifdef WAPI_NO_ATOMIC_ASM
29
mono_atomic_cas_i32(volatile gint32 * dest,gint32 exch,gint32 comp)30 gint32 mono_atomic_cas_i32(volatile gint32 *dest, gint32 exch,
31 gint32 comp)
32 {
33 gint32 old;
34 int ret;
35
36 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
37 (void *)&spin);
38 ret = pthread_mutex_lock(&spin);
39 g_assert (ret == 0);
40
41 old= *dest;
42 if(old==comp) {
43 *dest=exch;
44 }
45
46 ret = pthread_mutex_unlock(&spin);
47 g_assert (ret == 0);
48
49 pthread_cleanup_pop (0);
50
51 return(old);
52 }
53
mono_atomic_cas_ptr(volatile gpointer * dest,gpointer exch,gpointer comp)54 gpointer mono_atomic_cas_ptr(volatile gpointer *dest,
55 gpointer exch, gpointer comp)
56 {
57 gpointer old;
58 int ret;
59
60 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
61 (void *)&spin);
62 ret = pthread_mutex_lock(&spin);
63 g_assert (ret == 0);
64
65 old= *dest;
66 if(old==comp) {
67 *dest=exch;
68 }
69
70 ret = pthread_mutex_unlock(&spin);
71 g_assert (ret == 0);
72
73 pthread_cleanup_pop (0);
74
75 return(old);
76 }
77
mono_atomic_add_i32(volatile gint32 * dest,gint32 add)78 gint32 mono_atomic_add_i32(volatile gint32 *dest, gint32 add)
79 {
80 gint32 ret;
81 int thr_ret;
82
83 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
84 (void *)&spin);
85 thr_ret = pthread_mutex_lock(&spin);
86 g_assert (thr_ret == 0);
87
88 *dest += add;
89 ret= *dest;
90
91 thr_ret = pthread_mutex_unlock(&spin);
92 g_assert (thr_ret == 0);
93
94 pthread_cleanup_pop (0);
95
96 return(ret);
97 }
98
mono_atomic_add_i64(volatile gint64 * dest,gint64 add)99 gint64 mono_atomic_add_i64(volatile gint64 *dest, gint64 add)
100 {
101 gint64 ret;
102 int thr_ret;
103
104 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
105 (void *)&spin);
106 thr_ret = pthread_mutex_lock(&spin);
107 g_assert (thr_ret == 0);
108
109 *dest += add;
110 ret= *dest;
111
112 thr_ret = pthread_mutex_unlock(&spin);
113 g_assert (thr_ret == 0);
114
115 pthread_cleanup_pop (0);
116
117 return(ret);
118 }
119
mono_atomic_inc_i32(volatile gint32 * dest)120 gint32 mono_atomic_inc_i32(volatile gint32 *dest)
121 {
122 gint32 ret;
123 int thr_ret;
124
125 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
126 (void *)&spin);
127 thr_ret = pthread_mutex_lock(&spin);
128 g_assert (thr_ret == 0);
129
130 (*dest)++;
131 ret= *dest;
132
133 thr_ret = pthread_mutex_unlock(&spin);
134 g_assert (thr_ret == 0);
135
136 pthread_cleanup_pop (0);
137
138 return(ret);
139 }
140
mono_atomic_inc_i64(volatile gint64 * dest)141 gint64 mono_atomic_inc_i64(volatile gint64 *dest)
142 {
143 gint64 ret;
144 int thr_ret;
145
146 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
147 (void *)&spin);
148 thr_ret = pthread_mutex_lock(&spin);
149 g_assert (thr_ret == 0);
150
151 (*dest)++;
152 ret= *dest;
153
154 thr_ret = pthread_mutex_unlock(&spin);
155 g_assert (thr_ret == 0);
156
157 pthread_cleanup_pop (0);
158
159 return(ret);
160 }
161
mono_atomic_dec_i32(volatile gint32 * dest)162 gint32 mono_atomic_dec_i32(volatile gint32 *dest)
163 {
164 gint32 ret;
165 int thr_ret;
166
167 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
168 (void *)&spin);
169 thr_ret = pthread_mutex_lock(&spin);
170 g_assert (thr_ret == 0);
171
172 (*dest)--;
173 ret= *dest;
174
175 thr_ret = pthread_mutex_unlock(&spin);
176 g_assert (thr_ret == 0);
177
178 pthread_cleanup_pop (0);
179
180 return(ret);
181 }
182
mono_atomic_dec_i64(volatile gint64 * dest)183 gint64 mono_atomic_dec_i64(volatile gint64 *dest)
184 {
185 gint64 ret;
186 int thr_ret;
187
188 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
189 (void *)&spin);
190 thr_ret = pthread_mutex_lock(&spin);
191 g_assert (thr_ret == 0);
192
193 (*dest)--;
194 ret= *dest;
195
196 thr_ret = pthread_mutex_unlock(&spin);
197 g_assert (thr_ret == 0);
198
199 pthread_cleanup_pop (0);
200
201 return(ret);
202 }
203
mono_atomic_xchg_i32(volatile gint32 * dest,gint32 exch)204 gint32 mono_atomic_xchg_i32(volatile gint32 *dest, gint32 exch)
205 {
206 gint32 ret;
207 int thr_ret;
208
209 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
210 (void *)&spin);
211 thr_ret = pthread_mutex_lock(&spin);
212 g_assert (thr_ret == 0);
213
214 ret=*dest;
215 *dest=exch;
216
217 thr_ret = pthread_mutex_unlock(&spin);
218 g_assert (thr_ret == 0);
219
220 pthread_cleanup_pop (0);
221
222 return(ret);
223 }
224
mono_atomic_xchg_i64(volatile gint64 * dest,gint64 exch)225 gint64 mono_atomic_xchg_i64(volatile gint64 *dest, gint64 exch)
226 {
227 gint64 ret;
228 int thr_ret;
229
230 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
231 (void *)&spin);
232 thr_ret = pthread_mutex_lock(&spin);
233 g_assert (thr_ret == 0);
234
235 ret=*dest;
236 *dest=exch;
237
238 thr_ret = pthread_mutex_unlock(&spin);
239 g_assert (thr_ret == 0);
240
241 pthread_cleanup_pop (0);
242
243 return(ret);
244 }
245
mono_atomic_xchg_ptr(volatile gpointer * dest,gpointer exch)246 gpointer mono_atomic_xchg_ptr(volatile gpointer *dest, gpointer exch)
247 {
248 gpointer ret;
249 int thr_ret;
250
251 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
252 (void *)&spin);
253 thr_ret = pthread_mutex_lock(&spin);
254 g_assert (thr_ret == 0);
255
256 ret=*dest;
257 *dest=exch;
258
259 thr_ret = pthread_mutex_unlock(&spin);
260 g_assert (thr_ret == 0);
261
262 pthread_cleanup_pop (0);
263
264 return(ret);
265 }
266
mono_atomic_fetch_add_i32(volatile gint32 * dest,gint32 add)267 gint32 mono_atomic_fetch_add_i32(volatile gint32 *dest, gint32 add)
268 {
269 gint32 ret;
270 int thr_ret;
271
272 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
273 (void *)&spin);
274 thr_ret = pthread_mutex_lock(&spin);
275 g_assert (thr_ret == 0);
276
277 ret= *dest;
278 *dest+=add;
279
280 thr_ret = pthread_mutex_unlock(&spin);
281 g_assert (thr_ret == 0);
282
283 pthread_cleanup_pop (0);
284
285 return(ret);
286 }
287
mono_atomic_fetch_add_i64(volatile gint64 * dest,gint64 add)288 gint64 mono_atomic_fetch_add_i64(volatile gint64 *dest, gint64 add)
289 {
290 gint64 ret;
291 int thr_ret;
292
293 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
294 (void *)&spin);
295 thr_ret = pthread_mutex_lock(&spin);
296 g_assert (thr_ret == 0);
297
298 ret= *dest;
299 *dest+=add;
300
301 thr_ret = pthread_mutex_unlock(&spin);
302 g_assert (thr_ret == 0);
303
304 pthread_cleanup_pop (0);
305
306 return(ret);
307 }
308
mono_atomic_load_i8(volatile gint8 * src)309 gint8 mono_atomic_load_i8(volatile gint8 *src)
310 {
311 gint8 ret;
312 int thr_ret;
313
314 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
315 (void *)&spin);
316 thr_ret = pthread_mutex_lock(&spin);
317 g_assert (thr_ret == 0);
318
319 ret= *src;
320
321 thr_ret = pthread_mutex_unlock(&spin);
322 g_assert (thr_ret == 0);
323
324 pthread_cleanup_pop (0);
325
326 return(ret);
327 }
328
mono_atomic_load_i16(volatile gint16 * src)329 gint16 mono_atomic_load_i16(volatile gint16 *src)
330 {
331 gint16 ret;
332 int thr_ret;
333
334 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
335 (void *)&spin);
336 thr_ret = pthread_mutex_lock(&spin);
337 g_assert (thr_ret == 0);
338
339 ret= *src;
340
341 thr_ret = pthread_mutex_unlock(&spin);
342 g_assert (thr_ret == 0);
343
344 pthread_cleanup_pop (0);
345
346 return(ret);
347 }
348
mono_atomic_load_i32(volatile gint32 * src)349 gint32 mono_atomic_load_i32(volatile gint32 *src)
350 {
351 gint32 ret;
352 int thr_ret;
353
354 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
355 (void *)&spin);
356 thr_ret = pthread_mutex_lock(&spin);
357 g_assert (thr_ret == 0);
358
359 ret= *src;
360
361 thr_ret = pthread_mutex_unlock(&spin);
362 g_assert (thr_ret == 0);
363
364 pthread_cleanup_pop (0);
365
366 return(ret);
367 }
368
mono_atomic_load_i64(volatile gint64 * src)369 gint64 mono_atomic_load_i64(volatile gint64 *src)
370 {
371 gint64 ret;
372 int thr_ret;
373
374 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
375 (void *)&spin);
376 thr_ret = pthread_mutex_lock(&spin);
377 g_assert (thr_ret == 0);
378
379 ret= *src;
380
381 thr_ret = pthread_mutex_unlock(&spin);
382 g_assert (thr_ret == 0);
383
384 pthread_cleanup_pop (0);
385
386 return(ret);
387 }
388
mono_atomic_load_ptr(volatile gpointer * src)389 gpointer mono_atomic_load_ptr(volatile gpointer *src)
390 {
391 gpointer ret;
392 int thr_ret;
393
394 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
395 (void *)&spin);
396 thr_ret = pthread_mutex_lock(&spin);
397 g_assert (thr_ret == 0);
398
399 ret= *src;
400
401 thr_ret = pthread_mutex_unlock(&spin);
402 g_assert (thr_ret == 0);
403
404 pthread_cleanup_pop (0);
405
406 return(ret);
407 }
408
mono_atomic_store_i8(volatile gint8 * dst,gint8 val)409 void mono_atomic_store_i8(volatile gint8 *dst, gint8 val)
410 {
411 int thr_ret;
412
413 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
414 (void *)&spin);
415 thr_ret = pthread_mutex_lock(&spin);
416 g_assert (thr_ret == 0);
417
418 *dst=val;
419
420 thr_ret = pthread_mutex_unlock(&spin);
421 g_assert (thr_ret == 0);
422
423 pthread_cleanup_pop (0);
424 }
425
mono_atomic_store_i16(volatile gint16 * dst,gint16 val)426 void mono_atomic_store_i16(volatile gint16 *dst, gint16 val)
427 {
428 int thr_ret;
429
430 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
431 (void *)&spin);
432 thr_ret = pthread_mutex_lock(&spin);
433 g_assert (thr_ret == 0);
434
435 *dst=val;
436
437 thr_ret = pthread_mutex_unlock(&spin);
438 g_assert (thr_ret == 0);
439
440 pthread_cleanup_pop (0);
441 }
442
mono_atomic_store_i32(volatile gint32 * dst,gint32 val)443 void mono_atomic_store_i32(volatile gint32 *dst, gint32 val)
444 {
445 int thr_ret;
446
447 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
448 (void *)&spin);
449 thr_ret = pthread_mutex_lock(&spin);
450 g_assert (thr_ret == 0);
451
452 *dst=val;
453
454 thr_ret = pthread_mutex_unlock(&spin);
455 g_assert (thr_ret == 0);
456
457 pthread_cleanup_pop (0);
458 }
459
mono_atomic_store_i64(volatile gint64 * dst,gint64 val)460 void mono_atomic_store_i64(volatile gint64 *dst, gint64 val)
461 {
462 int thr_ret;
463
464 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
465 (void *)&spin);
466 thr_ret = pthread_mutex_lock(&spin);
467 g_assert (thr_ret == 0);
468
469 *dst=val;
470
471 thr_ret = pthread_mutex_unlock(&spin);
472 g_assert (thr_ret == 0);
473
474 pthread_cleanup_pop (0);
475 }
476
mono_atomic_store_ptr(volatile gpointer * dst,gpointer val)477 void mono_atomic_store_ptr(volatile gpointer *dst, gpointer val)
478 {
479 int thr_ret;
480
481 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
482 (void *)&spin);
483 thr_ret = pthread_mutex_lock(&spin);
484 g_assert (thr_ret == 0);
485
486 *dst=val;
487
488 thr_ret = pthread_mutex_unlock(&spin);
489 g_assert (thr_ret == 0);
490
491 pthread_cleanup_pop (0);
492 }
493
494 #endif
495
496 #if defined (NEED_64BIT_CMPXCHG_FALLBACK)
497
498 #if defined (TARGET_OSX)
499
500 /* The compiler breaks if this code is in the header... */
501
502 gint64
mono_atomic_cas_i64(volatile gint64 * dest,gint64 exch,gint64 comp)503 mono_atomic_cas_i64(volatile gint64 *dest, gint64 exch, gint64 comp)
504 {
505 return __sync_val_compare_and_swap (dest, comp, exch);
506 }
507
508 #elif defined (__arm__) && defined (HAVE_ARMV7) && (defined(TARGET_IOS) || defined(TARGET_WATCHOS) || defined(TARGET_ANDROID))
509
510 #if defined (TARGET_IOS) || defined (TARGET_WATCHOS)
511
512 #ifndef __clang__
513 #error "Not supported."
514 #endif
515
516 gint64
mono_atomic_cas_i64(volatile gint64 * dest,gint64 exch,gint64 comp)517 mono_atomic_cas_i64(volatile gint64 *dest, gint64 exch, gint64 comp)
518 {
519 return __sync_val_compare_and_swap (dest, comp, exch);
520 }
521
522 #elif defined (TARGET_ANDROID)
523
524 /* Some Android systems can't find the 64-bit CAS intrinsic at runtime,
525 * so we have to roll our own...
526 */
527
528 gint64 mono_atomic_cas_i64(volatile gint64 *dest, gint64 exch, gint64 comp) __attribute__ ((__naked__));
529
530 gint64
mono_atomic_cas_i64(volatile gint64 * dest,gint64 exch,gint64 comp)531 mono_atomic_cas_i64(volatile gint64 *dest, gint64 exch, gint64 comp)
532 {
533 __asm__ (
534 "push {r4, r5, r6, r7}\n"
535 "ldrd r4, [sp, #16]\n"
536 "dmb sy\n"
537 "1:\n"
538 "ldrexd r6, [r0]\n"
539 "cmp r7, r5\n"
540 "cmpeq r6, r4\n"
541 "bne 2f\n"
542 "strexd r1, r2, [r0]\n"
543 "cmp r1, #0\n"
544 "bne 1b\n"
545 "2:\n"
546 "dmb sy\n"
547 "mov r0, r6\n"
548 "mov r1, r7\n"
549 "pop {r4, r5, r6, r7}\n"
550 "bx lr\n"
551 );
552 }
553
554 #else
555
556 #error "Need a 64-bit CAS fallback!"
557
558 #endif
559
560 #else
561
562 gint64
mono_atomic_cas_i64(volatile gint64 * dest,gint64 exch,gint64 comp)563 mono_atomic_cas_i64(volatile gint64 *dest, gint64 exch, gint64 comp)
564 {
565 gint64 old;
566 int ret;
567
568 pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
569 (void *)&spin);
570 ret = pthread_mutex_lock(&spin);
571 g_assert (ret == 0);
572
573 old= *dest;
574 if(old==comp) {
575 *dest=exch;
576 }
577
578 ret = pthread_mutex_unlock(&spin);
579 g_assert (ret == 0);
580
581 pthread_cleanup_pop (0);
582
583 return(old);
584 }
585
586 #endif
587 #endif
588
589 #if !defined (WAPI_NO_ATOMIC_ASM) && !defined (BROKEN_64BIT_ATOMICS_INTRINSIC) && !defined (NEED_64BIT_CMPXCHG_FALLBACK)
590 MONO_EMPTY_SOURCE_FILE (atomic);
591 #endif
592