xref: /linux/arch/arc/include/asm/cmpxchg.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  */
5 
6 #ifndef __ASM_ARC_CMPXCHG_H
7 #define __ASM_ARC_CMPXCHG_H
8 
9 #include <linux/types.h>
10 
11 #include <asm/barrier.h>
12 #include <asm/smp.h>
13 
14 #ifdef CONFIG_ARC_HAS_LLSC
15 
16 static inline unsigned long
17 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
18 {
19 	unsigned long prev;
20 
21 	/*
22 	 * Explicit full memory barrier needed before/after as
23 	 * LLOCK/SCOND thmeselves don't provide any such semantics
24 	 */
25 	smp_mb();
26 
27 	__asm__ __volatile__(
28 	"1:	llock   %0, [%1]	\n"
29 	"	brne    %0, %2, 2f	\n"
30 	"	scond   %3, [%1]	\n"
31 	"	bnz     1b		\n"
32 	"2:				\n"
33 	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
34 	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
35 	  "ir"(expected),
36 	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
37 	: "cc", "memory"); /* so that gcc knows memory is being written here */
38 
39 	smp_mb();
40 
41 	return prev;
42 }
43 
44 #elif !defined(CONFIG_ARC_PLAT_EZNPS)
45 
46 static inline unsigned long
47 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
48 {
49 	unsigned long flags;
50 	int prev;
51 	volatile unsigned long *p = ptr;
52 
53 	/*
54 	 * spin lock/unlock provide the needed smp_mb() before/after
55 	 */
56 	atomic_ops_lock(flags);
57 	prev = *p;
58 	if (prev == expected)
59 		*p = new;
60 	atomic_ops_unlock(flags);
61 	return prev;
62 }
63 
64 #else /* CONFIG_ARC_PLAT_EZNPS */
65 
66 static inline unsigned long
67 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
68 {
69 	/*
70 	 * Explicit full memory barrier needed before/after
71 	 */
72 	smp_mb();
73 
74 	write_aux_reg(CTOP_AUX_GPA1, expected);
75 
76 	__asm__ __volatile__(
77 	"	mov r2, %0\n"
78 	"	mov r3, %1\n"
79 	"	.word %2\n"
80 	"	mov %0, r2"
81 	: "+r"(new)
82 	: "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
83 	: "r2", "r3", "memory");
84 
85 	smp_mb();
86 
87 	return new;
88 }
89 
90 #endif /* CONFIG_ARC_HAS_LLSC */
91 
92 #define cmpxchg(ptr, o, n) ({				\
93 	(typeof(*(ptr)))__cmpxchg((ptr),		\
94 				  (unsigned long)(o),	\
95 				  (unsigned long)(n));	\
96 })
97 
98 /*
99  * atomic_cmpxchg is same as cmpxchg
100  *   LLSC: only different in data-type, semantics are exactly same
101  *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
102  *         semantics, and this lock also happens to be used by atomic_*()
103  */
104 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
105 
106 
107 #ifndef CONFIG_ARC_PLAT_EZNPS
108 
109 /*
110  * xchg (reg with memory) based on "Native atomic" EX insn
111  */
112 static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
113 				   int size)
114 {
115 	extern unsigned long __xchg_bad_pointer(void);
116 
117 	switch (size) {
118 	case 4:
119 		smp_mb();
120 
121 		__asm__ __volatile__(
122 		"	ex  %0, [%1]	\n"
123 		: "+r"(val)
124 		: "r"(ptr)
125 		: "memory");
126 
127 		smp_mb();
128 
129 		return val;
130 	}
131 	return __xchg_bad_pointer();
132 }
133 
134 #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
135 						 sizeof(*(ptr))))
136 
137 /*
138  * xchg() maps directly to ARC EX instruction which guarantees atomicity.
139  * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
140  * due to a subtle reason:
141  *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
142  *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
143  *    Hence xchg() needs to follow same locking rules.
144  *
145  * Technically the lock is also needed for UP (boils down to irq save/restore)
146  * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
147  * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
148  * Other way around, xchg is one instruction anyways, so can't be interrupted
149  * as such
150  */
151 
152 #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
153 
154 #define xchg(ptr, with)			\
155 ({					\
156 	unsigned long flags;		\
157 	typeof(*(ptr)) old_val;		\
158 					\
159 	atomic_ops_lock(flags);		\
160 	old_val = _xchg(ptr, with);	\
161 	atomic_ops_unlock(flags);	\
162 	old_val;			\
163 })
164 
165 #else
166 
167 #define xchg(ptr, with)  _xchg(ptr, with)
168 
169 #endif
170 
171 #else /* CONFIG_ARC_PLAT_EZNPS */
172 
173 static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
174 				   int size)
175 {
176 	extern unsigned long __xchg_bad_pointer(void);
177 
178 	switch (size) {
179 	case 4:
180 		/*
181 		 * Explicit full memory barrier needed before/after
182 		 */
183 		smp_mb();
184 
185 		__asm__ __volatile__(
186 		"	mov r2, %0\n"
187 		"	mov r3, %1\n"
188 		"	.word %2\n"
189 		"	mov %0, r2\n"
190 		: "+r"(val)
191 		: "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
192 		: "r2", "r3", "memory");
193 
194 		smp_mb();
195 
196 		return val;
197 	}
198 	return __xchg_bad_pointer();
199 }
200 
201 #define xchg(ptr, with) ({				\
202 	(typeof(*(ptr)))__xchg((unsigned long)(with),	\
203 			       (ptr),			\
204 			       sizeof(*(ptr)));		\
205 })
206 
207 #endif /* CONFIG_ARC_PLAT_EZNPS */
208 
209 /*
210  * "atomic" variant of xchg()
211  * REQ: It needs to follow the same serialization rules as other atomic_xxx()
212  * Since xchg() doesn't always do that, it would seem that following defintion
213  * is incorrect. But here's the rationale:
214  *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
215  *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
216  *         is natively "SMP safe", no serialization required).
217  *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
218  *         could clobber them. atomic_xchg() itself would be 1 insn, so it
219  *         can't be clobbered by others. Thus no serialization required when
220  *         atomic_xchg is involved.
221  */
222 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
223 
224 #endif
225