1 // Copyright (c) 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file is an internal atomic implementation, use atomicops.h instead.
6 //
7 // This implementation uses C++11 atomics' member functions. The code base is
8 // currently written assuming atomicity revolves around accesses instead of
9 // C++11's memory locations. The burden is on the programmer to ensure that all
10 // memory locations accessed atomically are never accessed non-atomically (tsan
11 // should help with this).
12 //
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic
14 //           locations as truly atomic. See the static_assert below.
15 //
16 // Of note in this implementation:
17 //  * All NoBarrier variants are implemented as relaxed.
18 //  * All Barrier variants are implemented as sequentially-consistent.
19 //  * Compare exchange's failure ordering is always the same as the success one
20 //    (except for release, which fails as relaxed): using a weaker ordering is
21 //    only valid under certain uses of compare exchange.
22 //  * Acquire store doesn't exist in the C11 memory model, it is instead
23 //    implemented as a relaxed store followed by a sequentially consistent
24 //    fence.
25 //  * Release load doesn't exist in the C11 memory model, it is instead
26 //    implemented as sequentially consistent fence followed by a relaxed load.
27 //  * Atomic increment is expected to return the post-incremented value, whereas
28 //    C11 fetch add returns the previous value. The implementation therefore
29 //    needs to increment twice (which the compiler should be able to detect and
30 //    optimize).
31 
32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
34 
35 #include <atomic>
36 
37 #include "build/build_config.h"
38 
39 namespace base {
40 namespace subtle {
41 
42 // This implementation is transitional and maintains the original API for
43 // atomicops.h. This requires casting memory locations to the atomic types, and
44 // assumes that the API and the C++11 implementation are layout-compatible,
45 // which isn't true for all implementations or hardware platforms. The static
46 // assertion should detect this issue, were it to fire then this header
47 // shouldn't be used.
48 //
49 // TODO(jfb) If this header manages to stay committed then the API should be
50 //           modified, and all call sites updated.
51 typedef volatile std::atomic<Atomic32>* AtomicLocation32;
52 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
53               "incompatible 32-bit atomic layout");
54 
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)55 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
56                                          Atomic32 old_value,
57                                          Atomic32 new_value) {
58   ((AtomicLocation32)ptr)
59       ->compare_exchange_strong(old_value,
60                                 new_value,
61                                 std::memory_order_relaxed,
62                                 std::memory_order_relaxed);
63   return old_value;
64 }
65 
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)66 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
67                                          Atomic32 new_value) {
68   return ((AtomicLocation32)ptr)
69       ->exchange(new_value, std::memory_order_relaxed);
70 }
71 
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)72 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
73                                           Atomic32 increment) {
74   return increment +
75          ((AtomicLocation32)ptr)
76              ->fetch_add(increment, std::memory_order_relaxed);
77 }
78 
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)79 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
80                                         Atomic32 increment) {
81   return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
82 }
83 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)84 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
85                                        Atomic32 old_value,
86                                        Atomic32 new_value) {
87   ((AtomicLocation32)ptr)
88       ->compare_exchange_strong(old_value,
89                                 new_value,
90                                 std::memory_order_acquire,
91                                 std::memory_order_acquire);
92   return old_value;
93 }
94 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)95 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
96                                        Atomic32 old_value,
97                                        Atomic32 new_value) {
98   ((AtomicLocation32)ptr)
99       ->compare_exchange_strong(old_value,
100                                 new_value,
101                                 std::memory_order_release,
102                                 std::memory_order_relaxed);
103   return old_value;
104 }
105 
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)106 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
107   ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
108 }
109 
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)110 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
111   ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
112   std::atomic_thread_fence(std::memory_order_seq_cst);
113 }
114 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)115 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
116   ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
117 }
118 
NoBarrier_Load(volatile const Atomic32 * ptr)119 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
120   return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
121 }
122 
Acquire_Load(volatile const Atomic32 * ptr)123 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
124   return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
125 }
126 
Release_Load(volatile const Atomic32 * ptr)127 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
128   std::atomic_thread_fence(std::memory_order_seq_cst);
129   return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
130 }
131 
132 #if defined(ARCH_CPU_64_BITS)
133 
134 typedef volatile std::atomic<Atomic64>* AtomicLocation64;
135 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
136               "incompatible 64-bit atomic layout");
137 
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)138 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
139                                          Atomic64 old_value,
140                                          Atomic64 new_value) {
141   ((AtomicLocation64)ptr)
142       ->compare_exchange_strong(old_value,
143                                 new_value,
144                                 std::memory_order_relaxed,
145                                 std::memory_order_relaxed);
146   return old_value;
147 }
148 
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)149 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
150                                          Atomic64 new_value) {
151   return ((AtomicLocation64)ptr)
152       ->exchange(new_value, std::memory_order_relaxed);
153 }
154 
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)155 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
156                                           Atomic64 increment) {
157   return increment +
158          ((AtomicLocation64)ptr)
159              ->fetch_add(increment, std::memory_order_relaxed);
160 }
161 
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)162 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
163                                         Atomic64 increment) {
164   return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
165 }
166 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)167 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
168                                        Atomic64 old_value,
169                                        Atomic64 new_value) {
170   ((AtomicLocation64)ptr)
171       ->compare_exchange_strong(old_value,
172                                 new_value,
173                                 std::memory_order_acquire,
174                                 std::memory_order_acquire);
175   return old_value;
176 }
177 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)178 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
179                                        Atomic64 old_value,
180                                        Atomic64 new_value) {
181   ((AtomicLocation64)ptr)
182       ->compare_exchange_strong(old_value,
183                                 new_value,
184                                 std::memory_order_release,
185                                 std::memory_order_relaxed);
186   return old_value;
187 }
188 
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)189 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
190   ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
191 }
192 
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)193 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
194   ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
195   std::atomic_thread_fence(std::memory_order_seq_cst);
196 }
197 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)198 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
199   ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
200 }
201 
NoBarrier_Load(volatile const Atomic64 * ptr)202 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
203   return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
204 }
205 
Acquire_Load(volatile const Atomic64 * ptr)206 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
207   return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
208 }
209 
Release_Load(volatile const Atomic64 * ptr)210 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
211   std::atomic_thread_fence(std::memory_order_seq_cst);
212   return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
213 }
214 
215 #endif  // defined(ARCH_CPU_64_BITS)
216 }  // namespace subtle
217 }  // namespace base
218 
219 #endif  // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
220