1*dd81489dSjsg /* $OpenBSD: atomic.h,v 1.13 2022/08/29 02:01:18 jsg Exp $ */
2f57756c9Sart
33c991425Sdlg /*
43c991425Sdlg * Copyright (c) 2015 Martin Pieuchot
53c991425Sdlg *
63c991425Sdlg * Permission to use, copy, modify, and distribute this software for any
73c991425Sdlg * purpose with or without fee is hereby granted, provided that the above
83c991425Sdlg * copyright notice and this permission notice appear in all copies.
93c991425Sdlg *
103c991425Sdlg * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
113c991425Sdlg * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
123c991425Sdlg * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
133c991425Sdlg * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
143c991425Sdlg * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
153c991425Sdlg * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
163c991425Sdlg * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
173c991425Sdlg */
183c991425Sdlg
19f57756c9Sart
202fa72412Spirofti #ifndef _POWERPC_ATOMIC_H_
212fa72412Spirofti #define _POWERPC_ATOMIC_H_
22f57756c9Sart
238aa3ef09Sderaadt #if defined(_KERNEL)
248aa3ef09Sderaadt
25f57756c9Sart static __inline void
atomic_setbits_int(volatile unsigned int * uip,unsigned int v)262df76cc2Sguenther atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
27f57756c9Sart {
28b1e784d0Skettenis unsigned int tmp;
29b1e784d0Skettenis
30b1e784d0Skettenis __asm volatile (
31b1e784d0Skettenis "1: lwarx %0, 0, %2 \n"
32b1e784d0Skettenis " or %0, %1, %0 \n"
33b1e784d0Skettenis " stwcx. %0, 0, %2 \n"
34b1e784d0Skettenis " bne- 1b \n"
35035f2f4eSkettenis " sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
36f57756c9Sart }
37f57756c9Sart
38f57756c9Sart static __inline void
atomic_clearbits_int(volatile unsigned int * uip,unsigned int v)392df76cc2Sguenther atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
40f57756c9Sart {
41b1e784d0Skettenis unsigned int tmp;
42b1e784d0Skettenis
43b1e784d0Skettenis __asm volatile (
44b1e784d0Skettenis "1: lwarx %0, 0, %2 \n"
45b1e784d0Skettenis " andc %0, %0, %1 \n"
46b1e784d0Skettenis " stwcx. %0, 0, %2 \n"
47b1e784d0Skettenis " bne- 1b \n"
48035f2f4eSkettenis " sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
49f57756c9Sart }
50f57756c9Sart
517886579dSmpi #endif /* defined(_KERNEL) */
527886579dSmpi
533c991425Sdlg static inline unsigned int
_atomic_cas_uint(volatile unsigned int * p,unsigned int o,unsigned int n)543c991425Sdlg _atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
553c991425Sdlg {
563c991425Sdlg unsigned int rv;
573c991425Sdlg
583c991425Sdlg __asm volatile (
593c991425Sdlg "1: lwarx %0, 0, %2 \n"
603c991425Sdlg " cmpw 0, %0, %4 \n"
613c991425Sdlg " bne- 2f \n"
623c991425Sdlg " stwcx. %3, 0, %2 \n"
633c991425Sdlg " bne- 1b \n"
643c991425Sdlg "2: \n"
653c991425Sdlg : "=&r" (rv), "+m" (*p)
663c991425Sdlg : "r" (p), "r" (n), "r" (o)
673c991425Sdlg : "cc");
683c991425Sdlg
693c991425Sdlg return (rv);
703c991425Sdlg }
713c991425Sdlg #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
723c991425Sdlg
733c991425Sdlg static inline unsigned long
_atomic_cas_ulong(volatile unsigned long * p,unsigned long o,unsigned long n)743c991425Sdlg _atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
753c991425Sdlg {
763c991425Sdlg unsigned long rv;
773c991425Sdlg
783c991425Sdlg __asm volatile (
793c991425Sdlg "1: lwarx %0, 0, %2 \n"
803c991425Sdlg " cmpw 0, %0, %4 \n"
813c991425Sdlg " bne- 2f \n"
823c991425Sdlg " stwcx. %3, 0, %2 \n"
833c991425Sdlg " bne- 1b \n"
843c991425Sdlg "2: \n"
853c991425Sdlg : "=&r" (rv), "+m" (*p)
863c991425Sdlg : "r" (p), "r" (n), "r" (o)
873c991425Sdlg : "cc");
883c991425Sdlg
893c991425Sdlg return (rv);
903c991425Sdlg }
913c991425Sdlg #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
923c991425Sdlg
933c991425Sdlg static inline void *
_atomic_cas_ptr(volatile void * pp,void * o,void * n)943c991425Sdlg _atomic_cas_ptr(volatile void *pp, void *o, void *n)
953c991425Sdlg {
963c991425Sdlg void * volatile *p = pp;
973c991425Sdlg void *rv;
983c991425Sdlg
993c991425Sdlg __asm volatile (
1003c991425Sdlg "1: lwarx %0, 0, %2 \n"
1013c991425Sdlg " cmpw 0, %0, %4 \n"
1023c991425Sdlg " bne- 2f \n"
1033c991425Sdlg " stwcx. %3, 0, %2 \n"
1043c991425Sdlg " bne- 1b \n"
1053c991425Sdlg "2: \n"
1063c991425Sdlg : "=&r" (rv), "+m" (*p)
1073c991425Sdlg : "r" (p), "r" (n), "r" (o)
1083c991425Sdlg : "cc");
1093c991425Sdlg
1103c991425Sdlg return (rv);
1113c991425Sdlg }
1123c991425Sdlg #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
1133c991425Sdlg
1143c991425Sdlg static inline unsigned int
_atomic_swap_uint(volatile unsigned int * p,unsigned int v)1153c991425Sdlg _atomic_swap_uint(volatile unsigned int *p, unsigned int v)
1163c991425Sdlg {
1173c991425Sdlg unsigned int rv;
1183c991425Sdlg
1193c991425Sdlg __asm volatile (
1203c991425Sdlg "1: lwarx %0, 0, %2 \n"
1213c991425Sdlg " stwcx. %3, 0, %2 \n"
1223c991425Sdlg " bne- 1b \n"
1233c991425Sdlg : "=&r" (rv), "+m" (*p)
1243c991425Sdlg : "r" (p), "r" (v)
1253c991425Sdlg : "cc");
1263c991425Sdlg
1273c991425Sdlg return (rv);
1283c991425Sdlg }
1293c991425Sdlg #define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
1303c991425Sdlg
1313c991425Sdlg static inline unsigned long
_atomic_swap_ulong(volatile unsigned long * p,unsigned long v)1323c991425Sdlg _atomic_swap_ulong(volatile unsigned long *p, unsigned long v)
1333c991425Sdlg {
1343c991425Sdlg unsigned long rv;
1353c991425Sdlg
1363c991425Sdlg __asm volatile (
1373c991425Sdlg "1: lwarx %0, 0, %2 \n"
1383c991425Sdlg " stwcx. %3, 0, %2 \n"
1393c991425Sdlg " bne- 1b \n"
1403c991425Sdlg : "=&r" (rv), "+m" (*p)
1413c991425Sdlg : "r" (p), "r" (v)
1423c991425Sdlg : "cc");
1433c991425Sdlg
1443c991425Sdlg return (rv);
1453c991425Sdlg }
1463c991425Sdlg #define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
1473c991425Sdlg
1483c991425Sdlg static inline void *
_atomic_swap_ptr(volatile void * pp,void * v)1493c991425Sdlg _atomic_swap_ptr(volatile void *pp, void *v)
1503c991425Sdlg {
1513c991425Sdlg void * volatile *p = pp;
1523c991425Sdlg void *rv;
1533c991425Sdlg
1543c991425Sdlg __asm volatile (
1553c991425Sdlg "1: lwarx %0, 0, %2 \n"
1563c991425Sdlg " stwcx. %3, 0, %2 \n"
1573c991425Sdlg " bne- 1b \n"
1583c991425Sdlg : "=&r" (rv), "+m" (*p)
1593c991425Sdlg : "r" (p), "r" (v)
1603c991425Sdlg : "cc");
1613c991425Sdlg
1623c991425Sdlg return (rv);
1633c991425Sdlg }
1643c991425Sdlg #define atomic_swap_ptr(_p, _v) _atomic_swap_ptr((_p), (_v))
1653c991425Sdlg
1663c991425Sdlg static inline unsigned int
_atomic_add_int_nv(volatile unsigned int * p,unsigned int v)1673c991425Sdlg _atomic_add_int_nv(volatile unsigned int *p, unsigned int v)
1683c991425Sdlg {
1693c991425Sdlg unsigned int rv;
1703c991425Sdlg
1713c991425Sdlg __asm volatile (
1723c991425Sdlg "1: lwarx %0, 0, %2 \n"
1733c991425Sdlg " add %0, %3, %0 \n"
1743c991425Sdlg " stwcx. %0, 0, %2 \n"
1753c991425Sdlg " bne- 1b \n"
1763c991425Sdlg : "=&r" (rv), "+m" (*p)
1773c991425Sdlg : "r" (p), "r" (v)
1783c991425Sdlg : "cc", "xer");
1793c991425Sdlg
1803c991425Sdlg return (rv);
1813c991425Sdlg }
1823c991425Sdlg #define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
1833c991425Sdlg
1843c991425Sdlg static inline unsigned long
_atomic_add_long_nv(volatile unsigned long * p,unsigned long v)1853c991425Sdlg _atomic_add_long_nv(volatile unsigned long *p, unsigned long v)
1863c991425Sdlg {
1873c991425Sdlg unsigned long rv;
1883c991425Sdlg
1893c991425Sdlg __asm volatile (
1903c991425Sdlg "1: lwarx %0, 0, %2 \n"
1913c991425Sdlg " add %0, %3, %0 \n"
1923c991425Sdlg " stwcx. %0, 0, %2 \n"
1933c991425Sdlg " bne- 1b \n"
1943c991425Sdlg : "=&r" (rv), "+m" (*p)
1953c991425Sdlg : "r" (p), "r" (v)
1963c991425Sdlg : "cc", "xer");
1973c991425Sdlg
1983c991425Sdlg return (rv);
1993c991425Sdlg }
2003c991425Sdlg #define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
2013c991425Sdlg
2023c991425Sdlg static inline unsigned int
_atomic_sub_int_nv(volatile unsigned int * p,unsigned int v)2033c991425Sdlg _atomic_sub_int_nv(volatile unsigned int *p, unsigned int v)
2043c991425Sdlg {
2053c991425Sdlg unsigned int rv;
2063c991425Sdlg
2073c991425Sdlg __asm volatile (
2083c991425Sdlg "1: lwarx %0, 0, %2 \n"
2093c991425Sdlg " subf %0, %3, %0 \n"
2103c991425Sdlg " stwcx. %0, 0, %2 \n"
2113c991425Sdlg " bne- 1b \n"
2123c991425Sdlg : "=&r" (rv), "+m" (*p)
2133c991425Sdlg : "r" (p), "r" (v)
2143c991425Sdlg : "cc", "xer");
2153c991425Sdlg
2163c991425Sdlg return (rv);
2173c991425Sdlg }
2183c991425Sdlg #define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
2193c991425Sdlg
2203c991425Sdlg static inline unsigned long
_atomic_sub_long_nv(volatile unsigned long * p,unsigned long v)2213c991425Sdlg _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
2223c991425Sdlg {
2233c991425Sdlg unsigned long rv;
2243c991425Sdlg
2253c991425Sdlg __asm volatile (
2263c991425Sdlg "1: lwarx %0, 0, %2 \n"
2273c991425Sdlg " subf %0, %3, %0 \n"
2283c991425Sdlg " stwcx. %0, 0, %2 \n"
2293c991425Sdlg " bne- 1b \n"
2303c991425Sdlg : "=&r" (rv), "+m" (*p)
2313c991425Sdlg : "r" (p), "r" (v)
2323c991425Sdlg : "cc", "xer");
2333c991425Sdlg
2343c991425Sdlg return (rv);
2353c991425Sdlg }
2363c991425Sdlg #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
2373c991425Sdlg
2383c991425Sdlg static inline unsigned int
_atomic_addic_int_nv(volatile unsigned int * p,unsigned int v)2393c991425Sdlg _atomic_addic_int_nv(volatile unsigned int *p, unsigned int v)
2403c991425Sdlg {
2413c991425Sdlg unsigned int rv;
2423c991425Sdlg
2433c991425Sdlg __asm volatile (
2443c991425Sdlg "1: lwarx %0, 0, %2 \n"
2453c991425Sdlg " addic %0, %0, %3 \n"
2463c991425Sdlg " stwcx. %0, 0, %2 \n"
2473c991425Sdlg " bne- 1b \n"
2483c991425Sdlg : "=&r" (rv), "+m" (*p)
249d3299c81Skettenis : "r" (p), "i" (v)
2503c991425Sdlg : "cc", "xer");
2513c991425Sdlg
2523c991425Sdlg return (rv);
2533c991425Sdlg }
2543c991425Sdlg #define atomic_inc_int_nv(_p) _atomic_addic_int_nv((_p), 1)
2553c991425Sdlg #define atomic_dec_int_nv(_p) _atomic_addic_int_nv((_p), -1)
2563c991425Sdlg
2573c991425Sdlg static inline unsigned long
_atomic_addic_long_nv(volatile unsigned long * p,unsigned long v)2583c991425Sdlg _atomic_addic_long_nv(volatile unsigned long *p, unsigned long v)
2593c991425Sdlg {
2603c991425Sdlg unsigned long rv;
2613c991425Sdlg
2623c991425Sdlg __asm volatile (
2633c991425Sdlg "1: lwarx %0, 0, %2 \n"
2643c991425Sdlg " addic %0, %0, %3 \n"
2653c991425Sdlg " stwcx. %0, 0, %2 \n"
2663c991425Sdlg " bne- 1b \n"
2673c991425Sdlg : "=&r" (rv), "+m" (*p)
268d3299c81Skettenis : "r" (p), "i" (v)
2693c991425Sdlg : "cc", "xer");
2703c991425Sdlg
2713c991425Sdlg return (rv);
2723c991425Sdlg }
2733c991425Sdlg #define atomic_inc_long_nv(_p) _atomic_addic_long_nv((_p), 1)
2743c991425Sdlg #define atomic_dec_long_nv(_p) _atomic_addic_long_nv((_p), -1)
2753c991425Sdlg
276*dd81489dSjsg #define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
2779a9dc99cSkettenis
2787886579dSmpi #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
2799a9dc99cSkettenis #define membar_enter() __membar("isync")
2809a9dc99cSkettenis #define membar_exit() __membar("sync")
2819a9dc99cSkettenis #define membar_producer() __membar("sync")
2829a9dc99cSkettenis #define membar_consumer() __membar("isync")
2839a9dc99cSkettenis #define membar_sync() __membar("sync")
2849a9dc99cSkettenis #else
2859a9dc99cSkettenis #define membar_enter() __membar("")
2869a9dc99cSkettenis #define membar_exit() __membar("")
2879a9dc99cSkettenis #define membar_producer() __membar("")
2889a9dc99cSkettenis #define membar_consumer() __membar("")
2899a9dc99cSkettenis #define membar_sync() __membar("")
2909a9dc99cSkettenis #endif
2919a9dc99cSkettenis
2922fa72412Spirofti #endif /* _POWERPC_ATOMIC_H_ */
293