1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file is an internal atomic implementation, use base/atomicops.h instead.
6 
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
9 
10 #include "base/win/windows_types.h"
11 
12 #include <intrin.h>
13 
14 #include <atomic>
15 
16 #include "base/macros.h"
17 #include "build/build_config.h"
18 
19 namespace base {
20 namespace subtle {
21 
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)22 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
23                                          Atomic32 old_value,
24                                          Atomic32 new_value) {
25   LONG result = _InterlockedCompareExchange(
26       reinterpret_cast<volatile LONG*>(ptr),
27       static_cast<LONG>(new_value),
28       static_cast<LONG>(old_value));
29   return static_cast<Atomic32>(result);
30 }
31 
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)32 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
33                                          Atomic32 new_value) {
34   LONG result = _InterlockedExchange(
35       reinterpret_cast<volatile LONG*>(ptr),
36       static_cast<LONG>(new_value));
37   return static_cast<Atomic32>(result);
38 }
39 
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)40 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
41                                         Atomic32 increment) {
42   return _InterlockedExchangeAdd(
43       reinterpret_cast<volatile LONG*>(ptr),
44       static_cast<LONG>(increment)) + increment;
45 }
46 
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)47 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
48                                           Atomic32 increment) {
49   return Barrier_AtomicIncrement(ptr, increment);
50 }
51 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)52 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
53                                        Atomic32 old_value,
54                                        Atomic32 new_value) {
55   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
56 }
57 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)58 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
59                                        Atomic32 old_value,
60                                        Atomic32 new_value) {
61   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
62 }
63 
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)64 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
65   *ptr = value;
66 }
67 
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)68 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
69   NoBarrier_AtomicExchange(ptr, value);
70               // acts as a barrier in this implementation
71 }
72 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)73 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
74   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
75   // See comments in Atomic64 version of Release_Store() below.
76 }
77 
NoBarrier_Load(volatile const Atomic32 * ptr)78 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
79   return *ptr;
80 }
81 
Acquire_Load(volatile const Atomic32 * ptr)82 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
83   Atomic32 value = *ptr;
84   return value;
85 }
86 
Release_Load(volatile const Atomic32 * ptr)87 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
88   std::atomic_thread_fence(std::memory_order_seq_cst);
89   return *ptr;
90 }
91 
92 #if defined(_WIN64)
93 
94 // 64-bit low-level operations on 64-bit platform.
95 
96 static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
97 
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)98 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
99                                          Atomic64 old_value,
100                                          Atomic64 new_value) {
101   PVOID result = _InterlockedCompareExchangePointer(
102       reinterpret_cast<volatile PVOID*>(ptr),
103       reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
104   return reinterpret_cast<Atomic64>(result);
105 }
106 
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)107 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
108                                          Atomic64 new_value) {
109   PVOID result =
110       _InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
111                                   reinterpret_cast<PVOID>(new_value));
112   return reinterpret_cast<Atomic64>(result);
113 }
114 
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)115 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
116                                         Atomic64 increment) {
117   return _InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
118                                    static_cast<LONGLONG>(increment)) +
119          increment;
120 }
121 
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)122 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
123                                           Atomic64 increment) {
124   return Barrier_AtomicIncrement(ptr, increment);
125 }
126 
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)127 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
128   *ptr = value;
129 }
130 
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)131 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
132   NoBarrier_AtomicExchange(ptr, value);
133               // acts as a barrier in this implementation
134 }
135 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)136 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
137   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
138 
139   // When new chips come out, check:
140   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
141   //  System Programming Guide, Chatper 7: Multiple-processor management,
142   //  Section 7.2, Memory Ordering.
143   // Last seen at:
144   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
145 }
146 
NoBarrier_Load(volatile const Atomic64 * ptr)147 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
148   return *ptr;
149 }
150 
Acquire_Load(volatile const Atomic64 * ptr)151 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
152   Atomic64 value = *ptr;
153   return value;
154 }
155 
Release_Load(volatile const Atomic64 * ptr)156 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
157   std::atomic_thread_fence(std::memory_order_seq_cst);
158   return *ptr;
159 }
160 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)161 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
162                                        Atomic64 old_value,
163                                        Atomic64 new_value) {
164   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
165 }
166 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)167 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
168                                        Atomic64 old_value,
169                                        Atomic64 new_value) {
170   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
171 }
172 
173 
174 #endif  // defined(_WIN64)
175 
176 }  // namespace subtle
177 }  // namespace base
178 
179 #endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
180