1 /*
2  * Distributed under the Boost Software License, Version 1.0.
3  * (See accompanying file LICENSE_1_0.txt or copy at
4  * http://www.boost.org/LICENSE_1_0.txt)
5  *
6  * Copyright (c) 2011 Helge Bahmann
7  * Copyright (c) 2013 Tim Blechmann
8  * Copyright (c) 2014 Andrey Semashev
9  */
10 /*!
11  * \file   atomic/detail/core_ops_gcc_sync.hpp
12  *
13  * This header contains implementation of the \c core_operations template.
14  */
15 
16 #ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
17 #define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
18 
19 #include <cstddef>
20 #include <boost/memory_order.hpp>
21 #include <boost/atomic/detail/config.hpp>
22 #include <boost/atomic/detail/storage_traits.hpp>
23 #include <boost/atomic/detail/core_operations_fwd.hpp>
24 #include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
25 #include <boost/atomic/detail/type_traits/integral_constant.hpp>
26 #include <boost/atomic/detail/capabilities.hpp>
27 #include <boost/atomic/detail/header.hpp>
28 
29 #ifdef BOOST_HAS_PRAGMA_ONCE
30 #pragma once
31 #endif
32 
33 namespace boost {
34 namespace atomics {
35 namespace detail {
36 
37 struct core_operations_gcc_sync_base
38 {
39     static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
40     static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
41 
fence_before_storeboost::atomics::detail::core_operations_gcc_sync_base42     static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
43     {
44         if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
45             __sync_synchronize();
46     }
47 
fence_after_storeboost::atomics::detail::core_operations_gcc_sync_base48     static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
49     {
50         if (order == memory_order_seq_cst)
51             __sync_synchronize();
52     }
53 
fence_after_loadboost::atomics::detail::core_operations_gcc_sync_base54     static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
55     {
56         if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
57             __sync_synchronize();
58     }
59 };
60 
61 template< std::size_t Size, bool Signed, bool Interprocess >
62 struct core_operations_gcc_sync :
63     public core_operations_gcc_sync_base
64 {
65     typedef typename storage_traits< Size >::type storage_type;
66 
67     static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
68     static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< storage_size >::alignment;
69     static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
70     static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
71 
72     // In general, we cannot guarantee atomicity of plain loads and stores of anything larger than a single byte on
73     // an arbitrary CPU architecture. However, all modern architectures seem to guarantee atomic loads and stores of
74     // suitably aligned objects of up to a pointer size. For larger objects we should probably use intrinsics to guarantee
75     // atomicity. If there appears an architecture where this doesn't hold, this threshold needs to be updated (patches are welcome).
76     typedef atomics::detail::integral_constant< bool, storage_size <= sizeof(void*) > plain_stores_loads_are_atomic;
77 
storeboost::atomics::detail::core_operations_gcc_sync78     static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
79     {
80         store(storage, v, order, plain_stores_loads_are_atomic());
81     }
82 
storeboost::atomics::detail::core_operations_gcc_sync83     static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, atomics::detail::true_type) BOOST_NOEXCEPT
84     {
85         fence_before_store(order);
86         storage = v;
87         fence_after_store(order);
88     }
89 
storeboost::atomics::detail::core_operations_gcc_sync90     static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, atomics::detail::false_type) BOOST_NOEXCEPT
91     {
92         exchange(storage, v, order);
93     }
94 
loadboost::atomics::detail::core_operations_gcc_sync95     static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
96     {
97         return load(storage, order, plain_stores_loads_are_atomic());
98     }
99 
loadboost::atomics::detail::core_operations_gcc_sync100     static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order, atomics::detail::true_type) BOOST_NOEXCEPT
101     {
102         storage_type v = storage;
103         fence_after_load(order);
104         return v;
105     }
106 
loadboost::atomics::detail::core_operations_gcc_sync107     static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order, atomics::detail::false_type) BOOST_NOEXCEPT
108     {
109         // Note: don't use fetch_add or other arithmetics here since storage_type may not be an arithmetic type.
110         storage_type expected = storage_type();
111         storage_type desired = expected;
112         // We don't care if CAS succeeds or not. If it does, it will just write the same value there was before.
113         return __sync_val_compare_and_swap(const_cast< storage_type volatile* >(&storage), expected, desired);
114     }
115 
fetch_addboost::atomics::detail::core_operations_gcc_sync116     static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
117     {
118         return __sync_fetch_and_add(&storage, v);
119     }
120 
fetch_subboost::atomics::detail::core_operations_gcc_sync121     static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
122     {
123         return __sync_fetch_and_sub(&storage, v);
124     }
125 
exchangeboost::atomics::detail::core_operations_gcc_sync126     static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
127     {
128         // GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
129         // std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
130         // add a check here and fall back to a CAS loop.
131         if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
132             __sync_synchronize();
133         return __sync_lock_test_and_set(&storage, v);
134     }
135 
compare_exchange_strongboost::atomics::detail::core_operations_gcc_sync136     static BOOST_FORCEINLINE bool compare_exchange_strong(
137         storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
138     {
139         storage_type expected2 = expected;
140         storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
141 
142         if (old_val == expected2)
143         {
144             return true;
145         }
146         else
147         {
148             expected = old_val;
149             return false;
150         }
151     }
152 
compare_exchange_weakboost::atomics::detail::core_operations_gcc_sync153     static BOOST_FORCEINLINE bool compare_exchange_weak(
154         storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
155     {
156         return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
157     }
158 
fetch_andboost::atomics::detail::core_operations_gcc_sync159     static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
160     {
161         return __sync_fetch_and_and(&storage, v);
162     }
163 
fetch_orboost::atomics::detail::core_operations_gcc_sync164     static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
165     {
166         return __sync_fetch_and_or(&storage, v);
167     }
168 
fetch_xorboost::atomics::detail::core_operations_gcc_sync169     static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
170     {
171         return __sync_fetch_and_xor(&storage, v);
172     }
173 
test_and_setboost::atomics::detail::core_operations_gcc_sync174     static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
175     {
176         if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
177             __sync_synchronize();
178         return !!__sync_lock_test_and_set(&storage, 1);
179     }
180 
clearboost::atomics::detail::core_operations_gcc_sync181     static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
182     {
183         __sync_lock_release(&storage);
184         if (order == memory_order_seq_cst)
185             __sync_synchronize();
186     }
187 };
188 
189 #if BOOST_ATOMIC_INT8_LOCK_FREE > 0
190 template< bool Signed, bool Interprocess >
191 struct core_operations< 1u, Signed, Interprocess > :
192 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
193     public core_operations_gcc_sync< 1u, Signed, Interprocess >
194 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
195     public extending_cas_based_arithmetic< core_operations_gcc_sync< 2u, Signed, Interprocess >, 1u, Signed >
196 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
197     public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 1u, Signed >
198 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
199     public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 1u, Signed >
200 #else
201     public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 1u, Signed >
202 #endif
203 {
204 };
205 #endif
206 
207 #if BOOST_ATOMIC_INT16_LOCK_FREE > 0
208 template< bool Signed, bool Interprocess >
209 struct core_operations< 2u, Signed, Interprocess > :
210 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
211     public core_operations_gcc_sync< 2u, Signed, Interprocess >
212 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
213     public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 2u, Signed >
214 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
215     public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 2u, Signed >
216 #else
217     public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 2u, Signed >
218 #endif
219 {
220 };
221 #endif
222 
223 #if BOOST_ATOMIC_INT32_LOCK_FREE > 0
224 template< bool Signed, bool Interprocess >
225 struct core_operations< 4u, Signed, Interprocess > :
226 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
227     public core_operations_gcc_sync< 4u, Signed, Interprocess >
228 #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
229     public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 4u, Signed >
230 #else
231     public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 4u, Signed >
232 #endif
233 {
234 };
235 #endif
236 
237 #if BOOST_ATOMIC_INT64_LOCK_FREE > 0
238 template< bool Signed, bool Interprocess >
239 struct core_operations< 8u, Signed, Interprocess > :
240 #if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
241     public core_operations_gcc_sync< 8u, Signed, Interprocess >
242 #else
243     public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 8u, Signed >
244 #endif
245 {
246 };
247 #endif
248 
249 #if BOOST_ATOMIC_INT128_LOCK_FREE > 0
250 template< bool Signed, bool Interprocess >
251 struct core_operations< 16u, Signed, Interprocess > :
252     public core_operations_gcc_sync< 16u, Signed, Interprocess >
253 {
254 };
255 #endif
256 
257 } // namespace detail
258 } // namespace atomics
259 } // namespace boost
260 
261 #include <boost/atomic/detail/footer.hpp>
262 
263 #endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
264