1 /*
2 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3 * University Research and Technology
4 * Corporation. All rights reserved.
5 * Copyright (c) 2004-2005 The University of Tennessee and The University
6 * of Tennessee Research Foundation. All rights
7 * reserved.
8 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9 * University of Stuttgart. All rights reserved.
10 * Copyright (c) 2004-2005 The Regents of the University of California.
11 * All rights reserved.
12 * Copyright (c) 2007 Sun Microsystems, Inc. All rights reserverd.
13 * Copyright (c) 2016 Research Organization for Information Science
14 * and Technology (RIST). All rights reserved.
15 * $COPYRIGHT$
16 *
17 * Additional copyrights may follow
18 *
19 * $HEADER$
20 */
21
22 #ifndef OPAL_SYS_ARCH_ATOMIC_H
23 #define OPAL_SYS_ARCH_ATOMIC_H 1
24
25 /*
26 * On sparc v9, use casa and casxa (compare and swap) instructions.
27 */
28
29 #define ASI_P "0x80"
30
31 #define MEMBAR(type) __asm__ __volatile__ ("membar " type : : : "memory")
32
33
34 /**********************************************************************
35 *
36 * Define constants for Sparc v9 (Ultra Sparc)
37 *
38 *********************************************************************/
39 #define OPAL_HAVE_ATOMIC_MEM_BARRIER 1
40
41 #define OPAL_HAVE_ATOMIC_CMPSET_32 1
42
43 #define OPAL_HAVE_ATOMIC_CMPSET_64 1
44
45
46 /**********************************************************************
47 *
48 * Memory Barriers
49 *
50 *********************************************************************/
51 #if OPAL_GCC_INLINE_ASSEMBLY
52
opal_atomic_mb(void)53 static inline void opal_atomic_mb(void)
54 {
55 MEMBAR("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad");
56 }
57
58
opal_atomic_rmb(void)59 static inline void opal_atomic_rmb(void)
60 {
61 MEMBAR("#LoadLoad");
62 }
63
64
opal_atomic_wmb(void)65 static inline void opal_atomic_wmb(void)
66 {
67 MEMBAR("#StoreStore");
68 }
69
opal_atomic_isync(void)70 static inline void opal_atomic_isync(void)
71 {
72 }
73
74
75 #endif /* OPAL_GCC_INLINE_ASSEMBLY */
76
77
78 /**********************************************************************
79 *
80 * Atomic math operations
81 *
82 *********************************************************************/
83 #if OPAL_GCC_INLINE_ASSEMBLY
84
opal_atomic_cmpset_32(volatile int32_t * addr,int32_t oldval,int32_t newval)85 static inline int opal_atomic_cmpset_32( volatile int32_t *addr,
86 int32_t oldval, int32_t newval)
87 {
88 /* casa [reg(rs1)] %asi, reg(rs2), reg(rd)
89 *
90 * if (*(reg(rs1)) == reg(rs2) )
91 * swap reg(rd), *(reg(rs1))
92 * else
93 * reg(rd) = *(reg(rs1))
94 */
95
96 int32_t ret = newval;
97
98 __asm__ __volatile__("casa [%1] " ASI_P ", %2, %0"
99 : "+r" (ret)
100 : "r" (addr), "r" (oldval));
101 return (ret == oldval);
102 }
103
104
opal_atomic_cmpset_acq_32(volatile int32_t * addr,int32_t oldval,int32_t newval)105 static inline int opal_atomic_cmpset_acq_32( volatile int32_t *addr,
106 int32_t oldval, int32_t newval)
107 {
108 int rc;
109
110 rc = opal_atomic_cmpset_32(addr, oldval, newval);
111 opal_atomic_rmb();
112
113 return rc;
114 }
115
116
opal_atomic_cmpset_rel_32(volatile int32_t * addr,int32_t oldval,int32_t newval)117 static inline int opal_atomic_cmpset_rel_32( volatile int32_t *addr,
118 int32_t oldval, int32_t newval)
119 {
120 opal_atomic_wmb();
121 return opal_atomic_cmpset_32(addr, oldval, newval);
122 }
123
124
125 #if OPAL_ASSEMBLY_ARCH == OPAL_SPARCV9_64
126
opal_atomic_cmpset_64(volatile int64_t * addr,int64_t oldval,int64_t newval)127 static inline int opal_atomic_cmpset_64( volatile int64_t *addr,
128 int64_t oldval, int64_t newval)
129 {
130 /* casa [reg(rs1)] %asi, reg(rs2), reg(rd)
131 *
132 * if (*(reg(rs1)) == reg(rs1) )
133 * swap reg(rd), *(reg(rs1))
134 * else
135 * reg(rd) = *(reg(rs1))
136 */
137 int64_t ret = newval;
138
139 __asm__ __volatile__("casxa [%1] " ASI_P ", %2, %0"
140 : "+r" (ret)
141 : "r" (addr), "r" (oldval));
142 return (ret == oldval);
143 }
144
145 #else /* OPAL_ASSEMBLY_ARCH == OPAL_SPARCV9_64 */
146
opal_atomic_cmpset_64(volatile int64_t * addr,int64_t oldval,int64_t newval)147 static inline int opal_atomic_cmpset_64( volatile int64_t *addr,
148 int64_t oldval, int64_t newval)
149 {
150 /* casa [reg(rs1)] %asi, reg(rs2), reg(rd)
151 *
152 * if (*(reg(rs1)) == reg(rs1) )
153 * swap reg(rd), *(reg(rs1))
154 * else
155 * reg(rd) = *(reg(rs1))
156 *
157 */
158 long long ret = newval;
159
160 __asm__ __volatile__(
161 "ldx %0, %%g1 \n\t" /* g1 = ret */
162 "ldx %2, %%g2 \n\t" /* g2 = oldval */
163 "casxa [%1] " ASI_P ", %%g2, %%g1 \n\t"
164 "stx %%g1, %0 \n"
165 : "+m"(ret)
166 : "r"(addr), "m"(oldval)
167 : "%g1", "%g2"
168 );
169
170 return (ret == oldval);
171 }
172
173 #endif /* OPAL_ASSEMBLY_ARCH == OPAL_SPARCV9_64 */
174
opal_atomic_cmpset_acq_64(volatile int64_t * addr,int64_t oldval,int64_t newval)175 static inline int opal_atomic_cmpset_acq_64( volatile int64_t *addr,
176 int64_t oldval, int64_t newval)
177 {
178 int rc;
179
180 rc = opal_atomic_cmpset_64(addr, oldval, newval);
181 opal_atomic_rmb();
182
183 return rc;
184 }
185
186
opal_atomic_cmpset_rel_64(volatile int64_t * addr,int64_t oldval,int64_t newval)187 static inline int opal_atomic_cmpset_rel_64( volatile int64_t *addr,
188 int64_t oldval, int64_t newval)
189 {
190 opal_atomic_wmb();
191 return opal_atomic_cmpset_64(addr, oldval, newval);
192 }
193
194 #endif /* OPAL_GCC_INLINE_ASSEMBLY */
195
196
197 #endif /* ! OPAL_SYS_ARCH_ATOMIC_H */
198