xref: /openbsd/sys/arch/mips64/include/atomic.h (revision f6aab3d8)
1 /*	$OpenBSD: atomic.h,v 1.12 2019/10/28 09:41:37 visa Exp $	*/
2 
3 /* Public Domain */
4 
5 #ifndef _MIPS64_ATOMIC_H_
6 #define _MIPS64_ATOMIC_H_
7 
8 #if defined(_KERNEL)
9 
10 /* wait until the bits to set are clear, and set them */
11 static __inline void
12 atomic_wait_and_setbits_int(volatile unsigned int *uip, unsigned int v)
13 {
14 	unsigned int tmp0, tmp1;
15 
16 	__asm__ volatile (
17 	"1:	ll	%0,	0(%2)\n"
18 	"	and	%1,	%0,	%3\n"
19 	"	bnez	%1,	1b\n"
20 	"	or	%0,	%3,	%0\n"
21 	"	sc	%0,	0(%2)\n"
22 	"	beqz	%0,	1b\n"
23 	"	 nop\n" :
24 		"=&r"(tmp0), "=&r"(tmp1) :
25 		"r"(uip), "r"(v) : "memory");
26 }
27 
28 static __inline void
29 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
30 {
31 	unsigned int tmp;
32 
33 	__asm__ volatile (
34 	"1:	ll	%0,	0(%1)\n"
35 	"	or	%0,	%2,	%0\n"
36 	"	sc	%0,	0(%1)\n"
37 	"	beqz	%0,	1b\n"
38 	"	 nop\n" :
39 		"=&r"(tmp) :
40 		"r"(uip), "r"(v) : "memory");
41 }
42 
43 static __inline void
44 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
45 {
46 	unsigned int tmp;
47 
48 	__asm__ volatile (
49 	"1:	ll	%0,	0(%1)\n"
50 	"	and	%0,	%2,	%0\n"
51 	"	sc	%0,	0(%1)\n"
52 	"	beqz	%0,	1b\n"
53 	"	 nop\n" :
54 		"=&r"(tmp) :
55 		"r"(uip), "r"(~v) : "memory");
56 }
57 
58 #endif /* defined(_KERNEL) */
59 
60 static inline unsigned int
61 _atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
62 {
63 	unsigned int rv, wv;
64 
65 	__asm__ volatile (
66 	"1:	ll	%0,	%1\n"
67 	"	bne	%0,	%4,	2f\n"
68 	"	move	%2,	%3\n"
69 	"	sc	%2,	%1\n"
70 	"	beqz	%2,	1b\n"
71 	"2:	nop\n"
72 	    : "=&r" (rv), "+m" (*p), "=&r" (wv)
73 	    : "r" (n), "Ir" (o));
74 
75 	return (rv);
76 }
77 #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
78 
79 static inline unsigned long
80 _atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
81 {
82 	unsigned long rv, wv;
83 
84 	__asm__ volatile (
85 	"1:	lld	%0,	%1\n"
86 	"	bne	%0,	%4,	2f\n"
87 	"	move	%2,	%3\n"
88 	"	scd	%2,	%1\n"
89 	"	beqz	%2,	1b\n"
90 	"2:	nop\n"
91 	    : "=&r" (rv), "+m" (*p), "=&r" (wv)
92 	    : "r" (n), "Ir" (o));
93 
94 	return (rv);
95 }
96 #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
97 
98 static inline void *
99 _atomic_cas_ptr(volatile void *pp, void *o, void *n)
100 {
101 	void * volatile *p = pp;
102 	void *rv, *wv;
103 
104 	__asm__ volatile (
105 	"1:	lld	%0,	%1\n"
106 	"	bne	%0,	%4,	2f\n"
107 	"	move	%2,	%3\n"
108 	"	scd	%2,	%1\n"
109 	"	beqz	%2,	1b\n"
110 	"2:	nop\n"
111 	    : "=&r" (rv), "+m" (*p), "=&r" (wv)
112 	    : "r" (n), "Ir" (o));
113 
114 	return (rv);
115 }
116 #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
117 
118 
119 
120 static inline unsigned int
121 _atomic_swap_uint(volatile unsigned int *uip, unsigned int v)
122 {
123 	unsigned int o, t;
124 
125 	__asm__ volatile (
126 	"1:	ll	%0,	%1\n"
127 	"	move	%2,	%3\n"
128 	"	sc	%2,	%1\n"
129 	"	beqz	%2,	1b\n"
130 	"	nop\n"
131 	    : "=&r" (o), "+m" (*uip), "=&r" (t)
132 	    : "r" (v));
133 
134 	return (o);
135 }
136 #define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
137 
138 static inline unsigned long
139 _atomic_swap_ulong(volatile unsigned long *uip, unsigned long v)
140 {
141 	unsigned long o, t;
142 
143 	__asm__ volatile (
144 	"1:	lld	%0,	%1\n"
145 	"	move	%2,	%3\n"
146 	"	scd	%2,	%1\n"
147 	"	beqz	%2,	1b\n"
148 	"	nop\n"
149 	    : "=&r" (o), "+m" (*uip), "=&r" (t)
150 	    : "r" (v));
151 
152 	return (o);
153 }
154 #define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
155 
156 
157 static inline void *
158 _atomic_swap_ptr(volatile void *uipp, void *n)
159 {
160 	void * volatile *uip = uipp;
161 	void *o, *t;
162 
163 	__asm__ volatile (
164 	"1:	lld	%0,	%1\n"
165 	"	move	%2,	%3\n"
166 	"	scd	%2,	%1\n"
167 	"	beqz	%2,	1b\n"
168 	"	nop\n"
169 	    : "=&r" (o), "+m" (*uip), "=&r" (t)
170 	    : "r" (n));
171 
172 	return (o);
173 }
174 #define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n))
175 
176 static inline unsigned int
177 _atomic_add_int_nv(volatile unsigned int *uip, unsigned int v)
178 {
179 	unsigned int rv, nv;
180 
181 	__asm__ volatile (
182 	"1:	ll	%0,	%1\n"
183 	"	addu	%2,	%0,	%3\n"
184 	"	sc	%2,	%1\n"
185 	"	beqz	%2,	1b\n"
186 	"	nop\n"
187 	    : "=&r" (rv), "+m" (*uip), "=&r" (nv)
188 	    : "Ir" (v));
189 
190 	return (rv + v);
191 }
192 #define atomic_add_int_nv(_uip, _v) _atomic_add_int_nv((_uip), (_v))
193 #define atomic_sub_int_nv(_uip, _v) _atomic_add_int_nv((_uip), 0 - (_v))
194 
195 static inline unsigned long
196 _atomic_add_long_nv(volatile unsigned long *uip, unsigned long v)
197 {
198 	unsigned long rv, nv;
199 
200 	__asm__ volatile (
201 	"1:	lld	%0,	%1\n"
202 	"	daddu	%2,	%0,	%3\n"
203 	"	scd	%2,	%1\n"
204 	"	beqz	%2,	1b\n"
205 	"	nop\n"
206 	    : "=&r" (rv), "+m" (*uip), "=&r" (nv)
207 	    : "Ir" (v));
208 
209 	return (rv + v);
210 }
211 #define atomic_add_long_nv(_uip, _v) _atomic_add_long_nv((_uip), (_v))
212 #define atomic_sub_long_nv(_uip, _v) _atomic_add_long_nv((_uip), 0UL - (_v))
213 
214 #endif /* _MIPS64_ATOMIC_H_ */
215