1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
26 #define OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
27
28 #include "runtime/os.hpp"
29 #include "vm_version_arm.hpp"
30
31 // Implementation of class atomic
32
33 /*
34 * Atomic long operations on 32-bit ARM
35 * ARM v7 supports LDREXD/STREXD synchronization instructions so no problem.
36 * ARM < v7 does not have explicit 64 atomic load/store capability.
37 * However, gcc emits LDRD/STRD instructions on v5te and LDM/STM on v5t
38 * when loading/storing 64 bits.
39 * For non-MP machines (which is all we support for ARM < v7)
40 * under current Linux distros these instructions appear atomic.
41 * See section A3.5.3 of ARM Architecture Reference Manual for ARM v7.
42 * Also, for cmpxchg64, if ARM < v7 we check for cmpxchg64 support in the
43 * Linux kernel using _kuser_helper_version. See entry-armv.S in the Linux
44 * kernel source or kernel_user_helpers.txt in Linux Doc.
45 */
46
47 template<>
48 template<typename T>
operator ()(T const volatile * src) const49 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
50 STATIC_ASSERT(8 == sizeof(T));
51 return PrimitiveConversions::cast<T>(
52 (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
53 }
54
55 template<>
56 template<typename T>
operator ()(T store_value,T volatile * dest) const57 inline void Atomic::PlatformStore<8>::operator()(T store_value,
58 T volatile* dest) const {
59 STATIC_ASSERT(8 == sizeof(T));
60 (*os::atomic_store_long_func)(
61 PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
62 }
63
64 // As per atomic.hpp all read-modify-write operations have to provide two-way
65 // barriers semantics.
66 //
67 // For ARMv7 we add explicit barriers in the stubs.
68
69 template<size_t byte_size>
70 struct Atomic::PlatformAdd
71 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
72 {
73 template<typename I, typename D>
74 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
75 };
76
77 template<>
78 template<typename I, typename D>
add_and_fetch(I add_value,D volatile * dest,atomic_memory_order order) const79 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
80 atomic_memory_order order) const {
81 STATIC_ASSERT(4 == sizeof(I));
82 STATIC_ASSERT(4 == sizeof(D));
83 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
84 }
85
86
87 template<>
88 template<typename T>
operator ()(T exchange_value,T volatile * dest,atomic_memory_order order) const89 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
90 T volatile* dest,
91 atomic_memory_order order) const {
92 STATIC_ASSERT(4 == sizeof(T));
93 return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
94 }
95
96
97 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
98
99 // No direct support for cmpxchg of bytes; emulate using int.
100 template<>
101 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
102
103
reorder_cmpxchg_func(int32_t exchange_value,int32_t volatile * dest,int32_t compare_value)104 inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
105 int32_t volatile* dest,
106 int32_t compare_value) {
107 // Warning: Arguments are swapped to avoid moving them for kernel call
108 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
109 }
110
reorder_cmpxchg_long_func(int64_t exchange_value,int64_t volatile * dest,int64_t compare_value)111 inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
112 int64_t volatile* dest,
113 int64_t compare_value) {
114 assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
115 // Warning: Arguments are swapped to avoid moving them for kernel call
116 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
117 }
118
119
120 template<>
121 template<typename T>
operator ()(T exchange_value,T volatile * dest,T compare_value,atomic_memory_order order) const122 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
123 T volatile* dest,
124 T compare_value,
125 atomic_memory_order order) const {
126 STATIC_ASSERT(4 == sizeof(T));
127 return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
128 }
129
130 template<>
131 template<typename T>
operator ()(T exchange_value,T volatile * dest,T compare_value,atomic_memory_order order) const132 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
133 T volatile* dest,
134 T compare_value,
135 atomic_memory_order order) const {
136 STATIC_ASSERT(8 == sizeof(T));
137 return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
138 }
139
140 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
141