1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 // Implementation of class atomic
29
30 template<size_t byte_size>
31 struct Atomic::PlatformAdd
32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
33 {
34 template<typename I, typename D>
35 D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
36 };
37
38 template<>
39 template<typename I, typename D>
fetch_and_add(I add_value,D volatile * dest,atomic_memory_order) const40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
41 atomic_memory_order /* order */) const {
42 STATIC_ASSERT(4 == sizeof(I));
43 STATIC_ASSERT(4 == sizeof(D));
44 D old_value;
45 __asm__ volatile ( "lock xaddl %0,(%2)"
46 : "=r" (old_value)
47 : "0" (add_value), "r" (dest)
48 : "cc", "memory");
49 return old_value;
50 }
51
52 template<>
53 template<typename T>
operator ()(T exchange_value,T volatile * dest,atomic_memory_order) const54 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
55 T volatile* dest,
56 atomic_memory_order /* order */) const {
57 STATIC_ASSERT(4 == sizeof(T));
58 __asm__ volatile ( "xchgl (%2),%0"
59 : "=r" (exchange_value)
60 : "0" (exchange_value), "r" (dest)
61 : "memory");
62 return exchange_value;
63 }
64
65 template<>
66 template<typename T>
operator ()(T exchange_value,T volatile * dest,T compare_value,atomic_memory_order) const67 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
68 T volatile* dest,
69 T compare_value,
70 atomic_memory_order /* order */) const {
71 STATIC_ASSERT(1 == sizeof(T));
72 __asm__ volatile ( "lock cmpxchgb %1,(%3)"
73 : "=a" (exchange_value)
74 : "q" (exchange_value), "a" (compare_value), "r" (dest)
75 : "cc", "memory");
76 return exchange_value;
77 }
78
79 template<>
80 template<typename T>
operator ()(T exchange_value,T volatile * dest,T compare_value,atomic_memory_order) const81 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
82 T volatile* dest,
83 T compare_value,
84 atomic_memory_order /* order */) const {
85 STATIC_ASSERT(4 == sizeof(T));
86 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
87 : "=a" (exchange_value)
88 : "r" (exchange_value), "a" (compare_value), "r" (dest)
89 : "cc", "memory");
90 return exchange_value;
91 }
92
93 #ifdef AMD64
94 template<>
95 template<typename I, typename D>
fetch_and_add(I add_value,D volatile * dest,atomic_memory_order) const96 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
97 atomic_memory_order /* order */) const {
98 STATIC_ASSERT(8 == sizeof(I));
99 STATIC_ASSERT(8 == sizeof(D));
100 D old_value;
101 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
102 : "=r" (old_value)
103 : "0" (add_value), "r" (dest)
104 : "cc", "memory");
105 return old_value;
106 }
107
108 template<>
109 template<typename T>
operator ()(T exchange_value,T volatile * dest,atomic_memory_order) const110 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
111 T volatile* dest,
112 atomic_memory_order /* order */) const {
113 STATIC_ASSERT(8 == sizeof(T));
114 __asm__ __volatile__ ("xchgq (%2),%0"
115 : "=r" (exchange_value)
116 : "0" (exchange_value), "r" (dest)
117 : "memory");
118 return exchange_value;
119 }
120
121 template<>
122 template<typename T>
operator ()(T exchange_value,T volatile * dest,T compare_value,atomic_memory_order) const123 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
124 T volatile* dest,
125 T compare_value,
126 atomic_memory_order /* order */) const {
127 STATIC_ASSERT(8 == sizeof(T));
128 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
129 : "=a" (exchange_value)
130 : "r" (exchange_value), "a" (compare_value), "r" (dest)
131 : "cc", "memory");
132 return exchange_value;
133 }
134
135 #else // !AMD64
136
137 extern "C" {
138 // defined in bsd_x86.s
139 int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
140 void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
141 }
142
143 template<>
144 template<typename T>
operator ()(T exchange_value,T volatile * dest,T compare_value,atomic_memory_order) const145 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
146 T volatile* dest,
147 T compare_value,
148 atomic_memory_order /* order */) const {
149 STATIC_ASSERT(8 == sizeof(T));
150 return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
151 }
152
153 template<>
154 template<typename T>
operator ()(T const volatile * src) const155 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
156 STATIC_ASSERT(8 == sizeof(T));
157 volatile int64_t dest;
158 _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
159 return PrimitiveConversions::cast<T>(dest);
160 }
161
162 template<>
163 template<typename T>
operator ()(T store_value,T volatile * dest) const164 inline void Atomic::PlatformStore<8>::operator()(T store_value,
165 T volatile* dest) const {
166 STATIC_ASSERT(8 == sizeof(T));
167 _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
168 }
169
170 #endif // AMD64
171
172 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
173