1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following
12 // disclaimer in the documentation and/or other materials provided
13 // with the distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived
16 // from this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 // This file is an internal atomic implementation, use base/atomicops.h instead.
31 //
32 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
33
34 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
35 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
36
37 namespace base {
38 namespace subtle {
39
40 // Atomically execute:
41 // result = *ptr;
42 // if (*ptr == old_value)
43 // *ptr = new_value;
44 // return result;
45 //
46 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
47 // Always return the old value of "*ptr"
48 //
49 // This routine implies no memory barriers.
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)50 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
51 Atomic32 old_value,
52 Atomic32 new_value) {
53 Atomic32 prev, tmp;
54 __asm__ __volatile__(".set push\n"
55 ".set noreorder\n"
56 "1:\n"
57 "ll %0, %5\n" // prev = *ptr
58 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
59 "move %2, %4\n" // tmp = new_value
60 "sc %2, %1\n" // *ptr = tmp (with atomic check)
61 "beqz %2, 1b\n" // start again on atomic error
62 "nop\n" // delay slot nop
63 "2:\n"
64 ".set pop\n"
65 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
66 : "r" (old_value), "r" (new_value), "m" (*ptr)
67 : "memory");
68 return prev;
69 }
70
71 // Atomically store new_value into *ptr, returning the previous value held in
72 // *ptr. This routine implies no memory barriers.
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)73 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
74 Atomic32 new_value) {
75 Atomic32 temp, old;
76 __asm__ __volatile__(".set push\n"
77 ".set noreorder\n"
78 "1:\n"
79 "ll %1, %4\n" // old = *ptr
80 "move %0, %3\n" // temp = new_value
81 "sc %0, %2\n" // *ptr = temp (with atomic check)
82 "beqz %0, 1b\n" // start again on atomic error
83 "nop\n" // delay slot nop
84 ".set pop\n"
85 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
86 : "r" (new_value), "m" (*ptr)
87 : "memory");
88
89 return old;
90 }
91
92 // Atomically increment *ptr by "increment". Returns the new value of
93 // *ptr with the increment applied. This routine implies no memory barriers.
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)94 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
95 Atomic32 increment) {
96 Atomic32 temp, temp2;
97
98 __asm__ __volatile__(".set push\n"
99 ".set noreorder\n"
100 "1:\n"
101 "ll %0, %4\n" // temp = *ptr
102 "addu %1, %0, %3\n" // temp2 = temp + increment
103 "sc %1, %2\n" // *ptr = temp2 (with atomic check)
104 "beqz %1, 1b\n" // start again on atomic error
105 "addu %1, %0, %3\n" // temp2 = temp + increment
106 ".set pop\n"
107 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
108 : "Ir" (increment), "m" (*ptr)
109 : "memory");
110 // temp2 now holds the final value.
111 return temp2;
112 }
113
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)114 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
115 Atomic32 increment) {
116 MemoryBarrier();
117 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
118 MemoryBarrier();
119 return res;
120 }
121
122 // "Acquire" operations
123 // ensure that no later memory access can be reordered ahead of the operation.
124 // "Release" operations ensure that no previous memory access can be reordered
125 // after the operation. "Barrier" operations have both "Acquire" and "Release"
126 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
127 // access.
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)128 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
129 Atomic32 old_value,
130 Atomic32 new_value) {
131 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
132 MemoryBarrier();
133 return res;
134 }
135
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)136 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
137 Atomic32 old_value,
138 Atomic32 new_value) {
139 MemoryBarrier();
140 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
141 }
142
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)143 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
144 *ptr = value;
145 }
146
MemoryBarrier()147 inline void MemoryBarrier() {
148 __asm__ __volatile__("sync" : : : "memory");
149 }
150
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)151 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
152 *ptr = value;
153 MemoryBarrier();
154 }
155
Release_Store(volatile Atomic32 * ptr,Atomic32 value)156 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
157 MemoryBarrier();
158 *ptr = value;
159 }
160
NoBarrier_Load(volatile const Atomic32 * ptr)161 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
162 return *ptr;
163 }
164
Acquire_Load(volatile const Atomic32 * ptr)165 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
166 Atomic32 value = *ptr;
167 MemoryBarrier();
168 return value;
169 }
170
Release_Load(volatile const Atomic32 * ptr)171 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
172 MemoryBarrier();
173 return *ptr;
174 }
175
176 #if defined(__LP64__)
177 // 64-bit versions of the atomic ops.
178
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)179 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
180 Atomic64 old_value,
181 Atomic64 new_value) {
182 Atomic64 prev, tmp;
183 __asm__ __volatile__(".set push\n"
184 ".set noreorder\n"
185 "1:\n"
186 "lld %0, %5\n" // prev = *ptr
187 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
188 "move %2, %4\n" // tmp = new_value
189 "scd %2, %1\n" // *ptr = tmp (with atomic check)
190 "beqz %2, 1b\n" // start again on atomic error
191 "nop\n" // delay slot nop
192 "2:\n"
193 ".set pop\n"
194 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
195 : "r" (old_value), "r" (new_value), "m" (*ptr)
196 : "memory");
197 return prev;
198 }
199
200 // Atomically store new_value into *ptr, returning the previous value held in
201 // *ptr. This routine implies no memory barriers.
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)202 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
203 Atomic64 new_value) {
204 Atomic64 temp, old;
205 __asm__ __volatile__(".set push\n"
206 ".set noreorder\n"
207 "1:\n"
208 "lld %1, %4\n" // old = *ptr
209 "move %0, %3\n" // temp = new_value
210 "scd %0, %2\n" // *ptr = temp (with atomic check)
211 "beqz %0, 1b\n" // start again on atomic error
212 "nop\n" // delay slot nop
213 ".set pop\n"
214 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
215 : "r" (new_value), "m" (*ptr)
216 : "memory");
217
218 return old;
219 }
220
221 // Atomically increment *ptr by "increment". Returns the new value of
222 // *ptr with the increment applied. This routine implies no memory barriers.
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)223 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
224 Atomic64 increment) {
225 Atomic64 temp, temp2;
226
227 __asm__ __volatile__(".set push\n"
228 ".set noreorder\n"
229 "1:\n"
230 "lld %0, %4\n" // temp = *ptr
231 "daddu %1, %0, %3\n" // temp2 = temp + increment
232 "scd %1, %2\n" // *ptr = temp2 (with atomic check)
233 "beqz %1, 1b\n" // start again on atomic error
234 "daddu %1, %0, %3\n" // temp2 = temp + increment
235 ".set pop\n"
236 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
237 : "Ir" (increment), "m" (*ptr)
238 : "memory");
239 // temp2 now holds the final value.
240 return temp2;
241 }
242
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)243 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
244 Atomic64 increment) {
245 MemoryBarrier();
246 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
247 MemoryBarrier();
248 return res;
249 }
250
251 // "Acquire" operations
252 // ensure that no later memory access can be reordered ahead of the operation.
253 // "Release" operations ensure that no previous memory access can be reordered
254 // after the operation. "Barrier" operations have both "Acquire" and "Release"
255 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
256 // access.
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)257 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
258 Atomic64 old_value,
259 Atomic64 new_value) {
260 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
261 MemoryBarrier();
262 return res;
263 }
264
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)265 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
266 Atomic64 old_value,
267 Atomic64 new_value) {
268 MemoryBarrier();
269 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
270 }
271
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)272 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
273 *ptr = value;
274 }
275
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)276 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
277 *ptr = value;
278 MemoryBarrier();
279 }
280
Release_Store(volatile Atomic64 * ptr,Atomic64 value)281 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
282 MemoryBarrier();
283 *ptr = value;
284 }
285
NoBarrier_Load(volatile const Atomic64 * ptr)286 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
287 return *ptr;
288 }
289
Acquire_Load(volatile const Atomic64 * ptr)290 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
291 Atomic64 value = *ptr;
292 MemoryBarrier();
293 return value;
294 }
295
Release_Load(volatile const Atomic64 * ptr)296 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
297 MemoryBarrier();
298 return *ptr;
299 }
300 #endif
301
302 } // namespace base::subtle
303 } // namespace base
304
305 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
306