1 /*
2  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
3  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
4  * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
5  *
6  *
7  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9  *
10  * Permission is hereby granted to use or copy this program
11  * for any purpose,  provided the above notices are retained on all copies.
12  * Permission to modify the code and to distribute modified code is granted,
13  * provided the above notices are retained, and a notice that the code was
14  * modified is included with the above copyright notice.
15  *
16  * Some of the machine specific code was borrowed from our GC distribution.
17  */
18 
19 #include "../all_aligned_atomic_load_store.h"
20 
21 /* Real X86 implementations, appear                                     */
22 /* to enforce ordering between memory operations, EXCEPT that a later   */
23 /* read can pass earlier writes, presumably due to the visible          */
24 /* presence of store buffers.                                           */
25 /* We ignore the fact that the official specs                           */
26 /* seem to be much weaker (and arguably too weak to be usable).         */
27 
28 #include "../ordered_except_wr.h"
29 
30 #include "../test_and_set_t_is_char.h"
31 
32 #include "../standard_ao_double_t.h"
33 
34 AO_INLINE void
AO_nop_full(void)35 AO_nop_full(void)
36 {
37   /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips.      */
38   __asm__ __volatile__("mfence" : : : "memory");
39 }
40 
41 #define AO_HAVE_nop_full
42 
43 /* As far as we can tell, the lfence and sfence instructions are not    */
44 /* currently needed or useful for cached memory accesses.               */
45 
46 AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t * p,AO_t incr)47 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
48 {
49   AO_t result;
50 
51   __asm__ __volatile__ ("lock; xaddq %0, %1" :
52                         "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */
53                         : "memory");
54   return result;
55 }
56 
57 #define AO_HAVE_fetch_and_add_full
58 
59 AO_INLINE unsigned char
AO_char_fetch_and_add_full(volatile unsigned char * p,unsigned char incr)60 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
61 {
62   unsigned char result;
63 
64   __asm__ __volatile__ ("lock; xaddb %0, %1" :
65                         "=q" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */
66                         : "memory");
67   return result;
68 }
69 
70 #define AO_HAVE_char_fetch_and_add_full
71 
72 AO_INLINE unsigned short
AO_short_fetch_and_add_full(volatile unsigned short * p,unsigned short incr)73 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
74 {
75   unsigned short result;
76 
77   __asm__ __volatile__ ("lock; xaddw %0, %1" :
78                         "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */
79                         : "memory");
80   return result;
81 }
82 
83 #define AO_HAVE_short_fetch_and_add_full
84 
85 AO_INLINE unsigned int
AO_int_fetch_and_add_full(volatile unsigned int * p,unsigned int incr)86 AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
87 {
88   unsigned int result;
89 
90   __asm__ __volatile__ ("lock; xaddl %0, %1" :
91                         "=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */
92                         : "memory");
93   return result;
94 }
95 
96 #define AO_HAVE_int_fetch_and_add_full
97 
98 AO_INLINE void
AO_or_full(volatile AO_t * p,AO_t incr)99 AO_or_full (volatile AO_t *p, AO_t incr)
100 {
101   __asm__ __volatile__ ("lock; orq %1, %0" :
102                         "=m" (*p) : "r" (incr) /* , "m" (*p) */
103                         : "memory");
104 }
105 
106 #define AO_HAVE_or_full
107 
108 AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t * addr)109 AO_test_and_set_full(volatile AO_TS_t *addr)
110 {
111   AO_TS_t oldval;
112   /* Note: the "xchg" instruction does not need a "lock" prefix */
113   __asm__ __volatile__("xchg %0, %1"
114                 : "=q"(oldval), "=m"(*addr)
115                 : "0"(0xff) /* , "m"(*addr) */
116                 : "memory");
117   return (AO_TS_VAL_t)oldval;
118 }
119 
120 #define AO_HAVE_test_and_set_full
121 
122 /* Returns nonzero if the comparison succeeded. */
123 AO_INLINE int
AO_compare_and_swap_full(volatile AO_t * addr,AO_t old,AO_t new_val)124 AO_compare_and_swap_full(volatile AO_t *addr,
125                          AO_t old, AO_t new_val)
126 {
127   char result;
128   __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
129                        : "=m"(*addr), "=a"(result)
130                        : "r" (new_val), "a"(old) : "memory");
131   return (int) result;
132 }
133 
134 #define AO_HAVE_compare_and_swap_full
135 
136 #ifdef AO_CMPXCHG16B_AVAILABLE
137 /* NEC LE-IT: older AMD Opterons are missing this instruction.
138  * On these machines SIGILL will be thrown.
139  * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
140  * (lock based) version available */
141 /* HB: Changed this to not define either by default.  There are
142  * enough machines and tool chains around on which cmpxchg16b
143  * doesn't work.  And the emulation is unsafe by our usual rules.
144  * Hoewever both are clearly useful in certain cases.
145  */
146 AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t * addr,AO_t old_val1,AO_t old_val2,AO_t new_val1,AO_t new_val2)147 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
148                                        AO_t old_val1, AO_t old_val2,
149                                        AO_t new_val1, AO_t new_val2)
150 {
151   char result;
152   __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
153                        : "=m"(*addr), "=a"(result)
154                        : "m"(*addr), "d" (old_val2), "a" (old_val1),
155                          "c" (new_val2), "b" (new_val1) : "memory");
156   return (int) result;
157 }
158 #define AO_HAVE_compare_double_and_swap_double_full
159 #else
160 /* this one provides spinlock based emulation of CAS implemented in     */
161 /* atomic_ops.c.  We probably do not want to do this here, since it is  */
162 /* not atomic with respect to other kinds of updates of *addr.  On the  */
163 /* other hand, this may be a useful facility on occasion.               */
164 #ifdef AO_WEAK_DOUBLE_CAS_EMULATION
165 int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
166                                                 AO_t old_val1, AO_t old_val2,
167                                                 AO_t new_val1, AO_t new_val2);
168 
169 AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t * addr,AO_t old_val1,AO_t old_val2,AO_t new_val1,AO_t new_val2)170 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
171                                        AO_t old_val1, AO_t old_val2,
172                                        AO_t new_val1, AO_t new_val2)
173 {
174         return AO_compare_double_and_swap_double_emulation(addr,
175                                                            old_val1, old_val2,
176                                                            new_val1, new_val2);
177 }
178 #define AO_HAVE_compare_double_and_swap_double_full
179 #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
180 #endif /* AO_CMPXCHG16B_AVAILABLE */
181