1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4
5 /*
6 * PowerPC atomic operations
7 */
8
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14
15 /*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20 #define __atomic_acquire_fence() \
21 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
22
23 #define __atomic_release_fence() \
24 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
25
atomic_read(const atomic_t * v)26 static __inline__ int atomic_read(const atomic_t *v)
27 {
28 int t;
29
30 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
31
32 return t;
33 }
34
atomic_set(atomic_t * v,int i)35 static __inline__ void atomic_set(atomic_t *v, int i)
36 {
37 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
38 }
39
40 #define ATOMIC_OP(op, asm_op) \
41 static __inline__ void atomic_##op(int a, atomic_t *v) \
42 { \
43 int t; \
44 \
45 __asm__ __volatile__( \
46 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
47 #asm_op " %0,%2,%0\n" \
48 " stwcx. %0,0,%3 \n" \
49 " bne- 1b\n" \
50 : "=&r" (t), "+m" (v->counter) \
51 : "r" (a), "r" (&v->counter) \
52 : "cc"); \
53 } \
54
55 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
56 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
57 { \
58 int t; \
59 \
60 __asm__ __volatile__( \
61 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
62 #asm_op " %0,%2,%0\n" \
63 " stwcx. %0,0,%3\n" \
64 " bne- 1b\n" \
65 : "=&r" (t), "+m" (v->counter) \
66 : "r" (a), "r" (&v->counter) \
67 : "cc"); \
68 \
69 return t; \
70 }
71
72 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
73 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
74 { \
75 int res, t; \
76 \
77 __asm__ __volatile__( \
78 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
79 #asm_op " %1,%3,%0\n" \
80 " stwcx. %1,0,%4\n" \
81 " bne- 1b\n" \
82 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
83 : "r" (a), "r" (&v->counter) \
84 : "cc"); \
85 \
86 return res; \
87 }
88
89 #define ATOMIC_OPS(op, asm_op) \
90 ATOMIC_OP(op, asm_op) \
91 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
92 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
93
ATOMIC_OPS(add,add)94 ATOMIC_OPS(add, add)
95 ATOMIC_OPS(sub, subf)
96
97 #define atomic_add_return_relaxed atomic_add_return_relaxed
98 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
99
100 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
101 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
102
103 #undef ATOMIC_OPS
104 #define ATOMIC_OPS(op, asm_op) \
105 ATOMIC_OP(op, asm_op) \
106 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
107
108 ATOMIC_OPS(and, and)
109 ATOMIC_OPS(or, or)
110 ATOMIC_OPS(xor, xor)
111
112 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
113 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
114 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
115
116 #undef ATOMIC_OPS
117 #undef ATOMIC_FETCH_OP_RELAXED
118 #undef ATOMIC_OP_RETURN_RELAXED
119 #undef ATOMIC_OP
120
121 static __inline__ void atomic_inc(atomic_t *v)
122 {
123 int t;
124
125 __asm__ __volatile__(
126 "1: lwarx %0,0,%2 # atomic_inc\n\
127 addic %0,%0,1\n"
128 " stwcx. %0,0,%2 \n\
129 bne- 1b"
130 : "=&r" (t), "+m" (v->counter)
131 : "r" (&v->counter)
132 : "cc", "xer");
133 }
134 #define atomic_inc atomic_inc
135
atomic_inc_return_relaxed(atomic_t * v)136 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
137 {
138 int t;
139
140 __asm__ __volatile__(
141 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
142 " addic %0,%0,1\n"
143 " stwcx. %0,0,%2\n"
144 " bne- 1b"
145 : "=&r" (t), "+m" (v->counter)
146 : "r" (&v->counter)
147 : "cc", "xer");
148
149 return t;
150 }
151
atomic_dec(atomic_t * v)152 static __inline__ void atomic_dec(atomic_t *v)
153 {
154 int t;
155
156 __asm__ __volatile__(
157 "1: lwarx %0,0,%2 # atomic_dec\n\
158 addic %0,%0,-1\n"
159 " stwcx. %0,0,%2\n\
160 bne- 1b"
161 : "=&r" (t), "+m" (v->counter)
162 : "r" (&v->counter)
163 : "cc", "xer");
164 }
165 #define atomic_dec atomic_dec
166
atomic_dec_return_relaxed(atomic_t * v)167 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
168 {
169 int t;
170
171 __asm__ __volatile__(
172 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
173 " addic %0,%0,-1\n"
174 " stwcx. %0,0,%2\n"
175 " bne- 1b"
176 : "=&r" (t), "+m" (v->counter)
177 : "r" (&v->counter)
178 : "cc", "xer");
179
180 return t;
181 }
182
183 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
184 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
185
186 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
187 #define atomic_cmpxchg_relaxed(v, o, n) \
188 cmpxchg_relaxed(&((v)->counter), (o), (n))
189 #define atomic_cmpxchg_acquire(v, o, n) \
190 cmpxchg_acquire(&((v)->counter), (o), (n))
191
192 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
193 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
194
195 /*
196 * Don't want to override the generic atomic_try_cmpxchg_acquire, because
197 * we add a lock hint to the lwarx, which may not be wanted for the
198 * _acquire case (and is not used by the other _acquire variants so it
199 * would be a surprise).
200 */
201 static __always_inline bool
atomic_try_cmpxchg_lock(atomic_t * v,int * old,int new)202 atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
203 {
204 int r, o = *old;
205
206 __asm__ __volatile__ (
207 "1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
208 " cmpw 0,%0,%3 \n"
209 " bne- 2f \n"
210 " stwcx. %4,0,%2 \n"
211 " bne- 1b \n"
212 "\t" PPC_ACQUIRE_BARRIER " \n"
213 "2: \n"
214 : "=&r" (r), "+m" (v->counter)
215 : "r" (&v->counter), "r" (o), "r" (new)
216 : "cr0", "memory");
217
218 if (unlikely(r != o))
219 *old = r;
220 return likely(r == o);
221 }
222
223 /**
224 * atomic_fetch_add_unless - add unless the number is a given value
225 * @v: pointer of type atomic_t
226 * @a: the amount to add to v...
227 * @u: ...unless v is equal to u.
228 *
229 * Atomically adds @a to @v, so long as it was not @u.
230 * Returns the old value of @v.
231 */
atomic_fetch_add_unless(atomic_t * v,int a,int u)232 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
233 {
234 int t;
235
236 __asm__ __volatile__ (
237 PPC_ATOMIC_ENTRY_BARRIER
238 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
239 cmpw 0,%0,%3 \n\
240 beq 2f \n\
241 add %0,%2,%0 \n"
242 " stwcx. %0,0,%1 \n\
243 bne- 1b \n"
244 PPC_ATOMIC_EXIT_BARRIER
245 " subf %0,%2,%0 \n\
246 2:"
247 : "=&r" (t)
248 : "r" (&v->counter), "r" (a), "r" (u)
249 : "cc", "memory");
250
251 return t;
252 }
253 #define atomic_fetch_add_unless atomic_fetch_add_unless
254
255 /**
256 * atomic_inc_not_zero - increment unless the number is zero
257 * @v: pointer of type atomic_t
258 *
259 * Atomically increments @v by 1, so long as @v is non-zero.
260 * Returns non-zero if @v was non-zero, and zero otherwise.
261 */
atomic_inc_not_zero(atomic_t * v)262 static __inline__ int atomic_inc_not_zero(atomic_t *v)
263 {
264 int t1, t2;
265
266 __asm__ __volatile__ (
267 PPC_ATOMIC_ENTRY_BARRIER
268 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
269 cmpwi 0,%0,0\n\
270 beq- 2f\n\
271 addic %1,%0,1\n"
272 " stwcx. %1,0,%2\n\
273 bne- 1b\n"
274 PPC_ATOMIC_EXIT_BARRIER
275 "\n\
276 2:"
277 : "=&r" (t1), "=&r" (t2)
278 : "r" (&v->counter)
279 : "cc", "xer", "memory");
280
281 return t1;
282 }
283 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
284
285 /*
286 * Atomically test *v and decrement if it is greater than 0.
287 * The function returns the old value of *v minus 1, even if
288 * the atomic variable, v, was not decremented.
289 */
atomic_dec_if_positive(atomic_t * v)290 static __inline__ int atomic_dec_if_positive(atomic_t *v)
291 {
292 int t;
293
294 __asm__ __volatile__(
295 PPC_ATOMIC_ENTRY_BARRIER
296 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
297 cmpwi %0,1\n\
298 addi %0,%0,-1\n\
299 blt- 2f\n"
300 " stwcx. %0,0,%1\n\
301 bne- 1b"
302 PPC_ATOMIC_EXIT_BARRIER
303 "\n\
304 2:" : "=&b" (t)
305 : "r" (&v->counter)
306 : "cc", "memory");
307
308 return t;
309 }
310 #define atomic_dec_if_positive atomic_dec_if_positive
311
312 #ifdef __powerpc64__
313
314 #define ATOMIC64_INIT(i) { (i) }
315
atomic64_read(const atomic64_t * v)316 static __inline__ s64 atomic64_read(const atomic64_t *v)
317 {
318 s64 t;
319
320 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
321
322 return t;
323 }
324
atomic64_set(atomic64_t * v,s64 i)325 static __inline__ void atomic64_set(atomic64_t *v, s64 i)
326 {
327 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
328 }
329
330 #define ATOMIC64_OP(op, asm_op) \
331 static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
332 { \
333 s64 t; \
334 \
335 __asm__ __volatile__( \
336 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
337 #asm_op " %0,%2,%0\n" \
338 " stdcx. %0,0,%3 \n" \
339 " bne- 1b\n" \
340 : "=&r" (t), "+m" (v->counter) \
341 : "r" (a), "r" (&v->counter) \
342 : "cc"); \
343 }
344
345 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
346 static inline s64 \
347 atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
348 { \
349 s64 t; \
350 \
351 __asm__ __volatile__( \
352 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
353 #asm_op " %0,%2,%0\n" \
354 " stdcx. %0,0,%3\n" \
355 " bne- 1b\n" \
356 : "=&r" (t), "+m" (v->counter) \
357 : "r" (a), "r" (&v->counter) \
358 : "cc"); \
359 \
360 return t; \
361 }
362
363 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
364 static inline s64 \
365 atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
366 { \
367 s64 res, t; \
368 \
369 __asm__ __volatile__( \
370 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
371 #asm_op " %1,%3,%0\n" \
372 " stdcx. %1,0,%4\n" \
373 " bne- 1b\n" \
374 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
375 : "r" (a), "r" (&v->counter) \
376 : "cc"); \
377 \
378 return res; \
379 }
380
381 #define ATOMIC64_OPS(op, asm_op) \
382 ATOMIC64_OP(op, asm_op) \
383 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
384 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
385
ATOMIC64_OPS(add,add)386 ATOMIC64_OPS(add, add)
387 ATOMIC64_OPS(sub, subf)
388
389 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
390 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
391
392 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
393 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
394
395 #undef ATOMIC64_OPS
396 #define ATOMIC64_OPS(op, asm_op) \
397 ATOMIC64_OP(op, asm_op) \
398 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
399
400 ATOMIC64_OPS(and, and)
401 ATOMIC64_OPS(or, or)
402 ATOMIC64_OPS(xor, xor)
403
404 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
405 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
406 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
407
408 #undef ATOPIC64_OPS
409 #undef ATOMIC64_FETCH_OP_RELAXED
410 #undef ATOMIC64_OP_RETURN_RELAXED
411 #undef ATOMIC64_OP
412
413 static __inline__ void atomic64_inc(atomic64_t *v)
414 {
415 s64 t;
416
417 __asm__ __volatile__(
418 "1: ldarx %0,0,%2 # atomic64_inc\n\
419 addic %0,%0,1\n\
420 stdcx. %0,0,%2 \n\
421 bne- 1b"
422 : "=&r" (t), "+m" (v->counter)
423 : "r" (&v->counter)
424 : "cc", "xer");
425 }
426 #define atomic64_inc atomic64_inc
427
atomic64_inc_return_relaxed(atomic64_t * v)428 static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
429 {
430 s64 t;
431
432 __asm__ __volatile__(
433 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
434 " addic %0,%0,1\n"
435 " stdcx. %0,0,%2\n"
436 " bne- 1b"
437 : "=&r" (t), "+m" (v->counter)
438 : "r" (&v->counter)
439 : "cc", "xer");
440
441 return t;
442 }
443
atomic64_dec(atomic64_t * v)444 static __inline__ void atomic64_dec(atomic64_t *v)
445 {
446 s64 t;
447
448 __asm__ __volatile__(
449 "1: ldarx %0,0,%2 # atomic64_dec\n\
450 addic %0,%0,-1\n\
451 stdcx. %0,0,%2\n\
452 bne- 1b"
453 : "=&r" (t), "+m" (v->counter)
454 : "r" (&v->counter)
455 : "cc", "xer");
456 }
457 #define atomic64_dec atomic64_dec
458
atomic64_dec_return_relaxed(atomic64_t * v)459 static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
460 {
461 s64 t;
462
463 __asm__ __volatile__(
464 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
465 " addic %0,%0,-1\n"
466 " stdcx. %0,0,%2\n"
467 " bne- 1b"
468 : "=&r" (t), "+m" (v->counter)
469 : "r" (&v->counter)
470 : "cc", "xer");
471
472 return t;
473 }
474
475 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
476 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
477
478 /*
479 * Atomically test *v and decrement if it is greater than 0.
480 * The function returns the old value of *v minus 1.
481 */
atomic64_dec_if_positive(atomic64_t * v)482 static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
483 {
484 s64 t;
485
486 __asm__ __volatile__(
487 PPC_ATOMIC_ENTRY_BARRIER
488 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
489 addic. %0,%0,-1\n\
490 blt- 2f\n\
491 stdcx. %0,0,%1\n\
492 bne- 1b"
493 PPC_ATOMIC_EXIT_BARRIER
494 "\n\
495 2:" : "=&r" (t)
496 : "r" (&v->counter)
497 : "cc", "xer", "memory");
498
499 return t;
500 }
501 #define atomic64_dec_if_positive atomic64_dec_if_positive
502
503 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
504 #define atomic64_cmpxchg_relaxed(v, o, n) \
505 cmpxchg_relaxed(&((v)->counter), (o), (n))
506 #define atomic64_cmpxchg_acquire(v, o, n) \
507 cmpxchg_acquire(&((v)->counter), (o), (n))
508
509 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
510 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
511
512 /**
513 * atomic64_fetch_add_unless - add unless the number is a given value
514 * @v: pointer of type atomic64_t
515 * @a: the amount to add to v...
516 * @u: ...unless v is equal to u.
517 *
518 * Atomically adds @a to @v, so long as it was not @u.
519 * Returns the old value of @v.
520 */
atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)521 static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
522 {
523 s64 t;
524
525 __asm__ __volatile__ (
526 PPC_ATOMIC_ENTRY_BARRIER
527 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
528 cmpd 0,%0,%3 \n\
529 beq 2f \n\
530 add %0,%2,%0 \n"
531 " stdcx. %0,0,%1 \n\
532 bne- 1b \n"
533 PPC_ATOMIC_EXIT_BARRIER
534 " subf %0,%2,%0 \n\
535 2:"
536 : "=&r" (t)
537 : "r" (&v->counter), "r" (a), "r" (u)
538 : "cc", "memory");
539
540 return t;
541 }
542 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
543
544 /**
545 * atomic_inc64_not_zero - increment unless the number is zero
546 * @v: pointer of type atomic64_t
547 *
548 * Atomically increments @v by 1, so long as @v is non-zero.
549 * Returns non-zero if @v was non-zero, and zero otherwise.
550 */
atomic64_inc_not_zero(atomic64_t * v)551 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
552 {
553 s64 t1, t2;
554
555 __asm__ __volatile__ (
556 PPC_ATOMIC_ENTRY_BARRIER
557 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
558 cmpdi 0,%0,0\n\
559 beq- 2f\n\
560 addic %1,%0,1\n\
561 stdcx. %1,0,%2\n\
562 bne- 1b\n"
563 PPC_ATOMIC_EXIT_BARRIER
564 "\n\
565 2:"
566 : "=&r" (t1), "=&r" (t2)
567 : "r" (&v->counter)
568 : "cc", "xer", "memory");
569
570 return t1 != 0;
571 }
572 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
573
574 #endif /* __powerpc64__ */
575
576 #endif /* __KERNEL__ */
577 #endif /* _ASM_POWERPC_ATOMIC_H_ */
578