xref: /freebsd/sys/i386/include/atomic.h (revision 57621b8b)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
30 
31 /*
32  * Various simple arithmetic on memory which is atomic in the presence
33  * of interrupts and multiple processors.
34  *
35  * atomic_set_char(P, V)	(*(u_char*)(P) |= (V))
36  * atomic_clear_char(P, V)	(*(u_char*)(P) &= ~(V))
37  * atomic_add_char(P, V)	(*(u_char*)(P) += (V))
38  * atomic_subtract_char(P, V)	(*(u_char*)(P) -= (V))
39  *
40  * atomic_set_short(P, V)	(*(u_short*)(P) |= (V))
41  * atomic_clear_short(P, V)	(*(u_short*)(P) &= ~(V))
42  * atomic_add_short(P, V)	(*(u_short*)(P) += (V))
43  * atomic_subtract_short(P, V)	(*(u_short*)(P) -= (V))
44  *
45  * atomic_set_int(P, V)		(*(u_int*)(P) |= (V))
46  * atomic_clear_int(P, V)	(*(u_int*)(P) &= ~(V))
47  * atomic_add_int(P, V)		(*(u_int*)(P) += (V))
48  * atomic_subtract_int(P, V)	(*(u_int*)(P) -= (V))
49  * atomic_readandclear_int(P)	(return  *(u_int*)P; *(u_int*)P = 0;)
50  *
51  * atomic_set_long(P, V)	(*(u_long*)(P) |= (V))
52  * atomic_clear_long(P, V)	(*(u_long*)(P) &= ~(V))
53  * atomic_add_long(P, V)	(*(u_long*)(P) += (V))
54  * atomic_subtract_long(P, V)	(*(u_long*)(P) -= (V))
55  * atomic_readandclear_long(P)	(return  *(u_long*)P; *(u_long*)P = 0;)
56  */
57 
58 /*
59  * The above functions are expanded inline in the statically-linked
60  * kernel.  Lock prefixes are generated if an SMP kernel is being
61  * built.
62  *
63  * Kernel modules call real functions which are built into the kernel.
64  * This allows kernel modules to be portable between UP and SMP systems.
65  */
66 #if defined(KLD_MODULE)
67 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
68 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
69 
70 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
71 
72 #define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
73 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
74 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
75 
76 #else /* !KLD_MODULE */
77 
78 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
79 
80 /*
81  * For userland, assume the SMP case and use lock prefixes so that
82  * the binaries will run on both types of systems.
83  */
84 #if defined(SMP) || !defined(_KERNEL)
85 #define MPLOCKED	lock ;
86 #else
87 #define MPLOCKED
88 #endif
89 
90 /*
91  * The assembly is volatilized to demark potential before-and-after side
92  * effects if an interrupt or SMP collision were to occur.
93  */
94 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
95 static __inline void					\
96 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
97 {							\
98 	__asm __volatile(__XSTRING(MPLOCKED) OP		\
99 			 : "+m" (*p)			\
100 			 : CONS (V));			\
101 }							\
102 struct __hack
103 
104 #else /* !(__GNUC__ || __INTEL_COMPILER) */
105 
106 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)				\
107 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
108 
109 #endif /* __GNUC__ || __INTEL_COMPILER */
110 
111 /*
112  * Atomic compare and set, used by the mutex functions
113  *
114  * if (*dst == exp) *dst = src (all 32 bit words)
115  *
116  * Returns 0 on failure, non-zero on success
117  */
118 
119 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
120 
121 #if defined(I386_CPU) || defined(CPU_DISABLE_CMPXCHG)
122 
123 static __inline int
124 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
125 {
126 	int res = exp;
127 
128 	__asm __volatile(
129 	"	pushfl ;		"
130 	"	cli ;			"
131 	"	cmpl	%0,%2 ;		"
132 	"	jne	1f ;		"
133 	"	movl	%1,%2 ;		"
134 	"1:				"
135 	"       sete	%%al;		"
136 	"	movzbl	%%al,%0 ;	"
137 	"	popfl ;			"
138 	"# atomic_cmpset_int"
139 	: "+a" (res)			/* 0 (result) */
140 	: "r" (src),			/* 1 */
141 	  "m" (*(dst))			/* 2 */
142 	: "memory");
143 
144 	return (res);
145 }
146 
147 #else /* defined(I386_CPU) */
148 
149 static __inline int
150 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
151 {
152 	int res = exp;
153 
154 	__asm __volatile (
155 	"	" __XSTRING(MPLOCKED) "	"
156 	"	cmpxchgl %1,%2 ;	"
157 	"       setz	%%al ;		"
158 	"	movzbl	%%al,%0 ;	"
159 	"1:				"
160 	"# atomic_cmpset_int"
161 	: "+a" (res)			/* 0 (result) */
162 	: "r" (src),			/* 1 */
163 	  "m" (*(dst))			/* 2 */
164 	: "memory");
165 
166 	return (res);
167 }
168 
169 #endif /* defined(I386_CPU) */
170 
171 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
172 
173 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
174 
175 #if !defined(SMP)
176 
177 /*
178  * We assume that a = b will do atomic loads and stores.  However, on a
179  * PentiumPro or higher, reads may pass writes, so for that case we have
180  * to use a serializing instruction (i.e. with LOCK) to do the load in
181  * SMP kernels.  For UP kernels, however, the cache of the single processor
182  * is always consistent, so we don't need any memory barriers.
183  */
184 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
185 static __inline u_##TYPE				\
186 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
187 {							\
188 	return (*p);					\
189 }							\
190 							\
191 static __inline void					\
192 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
193 {							\
194 	__asm __volatile("" : : : "memory");		\
195 	*p = v;						\
196 }							\
197 struct __hack
198 
199 #else /* defined(SMP) */
200 
201 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
202 static __inline u_##TYPE				\
203 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
204 {							\
205 	u_##TYPE res;					\
206 							\
207 	__asm __volatile(__XSTRING(MPLOCKED) LOP	\
208 	: "=a" (res),			/* 0 (result) */\
209 	  "+m" (*p)			/* 1 */		\
210 	: : "memory");				 	\
211 							\
212 	return (res);					\
213 }							\
214 							\
215 /*							\
216  * The XCHG instruction asserts LOCK automagically.	\
217  */							\
218 static __inline void					\
219 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
220 {							\
221 	__asm __volatile(SOP				\
222 	: "+m" (*p),			/* 0 */		\
223 	  "+r" (v)			/* 1 */		\
224 	: : "memory");				 	\
225 }							\
226 struct __hack
227 
228 #endif	/* !defined(SMP) */
229 
230 #else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */
231 
232 extern int atomic_cmpset_int(volatile u_int *, u_int, u_int);
233 
234 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)				\
235 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p);		\
236 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
237 
238 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
239 
240 #endif /* KLD_MODULE */
241 
242 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
243 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
244 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
245 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
246 
247 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
248 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
249 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
250 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
251 
252 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
253 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
254 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
255 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
256 
257 ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
258 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
259 ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
260 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
261 
262 ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
263 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
264 ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
265 ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
266 
267 #undef ATOMIC_ASM
268 #undef ATOMIC_STORE_LOAD
269 
270 #define	atomic_set_acq_char		atomic_set_char
271 #define	atomic_set_rel_char		atomic_set_char
272 #define	atomic_clear_acq_char		atomic_clear_char
273 #define	atomic_clear_rel_char		atomic_clear_char
274 #define	atomic_add_acq_char		atomic_add_char
275 #define	atomic_add_rel_char		atomic_add_char
276 #define	atomic_subtract_acq_char	atomic_subtract_char
277 #define	atomic_subtract_rel_char	atomic_subtract_char
278 
279 #define	atomic_set_acq_short		atomic_set_short
280 #define	atomic_set_rel_short		atomic_set_short
281 #define	atomic_clear_acq_short		atomic_clear_short
282 #define	atomic_clear_rel_short		atomic_clear_short
283 #define	atomic_add_acq_short		atomic_add_short
284 #define	atomic_add_rel_short		atomic_add_short
285 #define	atomic_subtract_acq_short	atomic_subtract_short
286 #define	atomic_subtract_rel_short	atomic_subtract_short
287 
288 #define	atomic_set_acq_int		atomic_set_int
289 #define	atomic_set_rel_int		atomic_set_int
290 #define	atomic_clear_acq_int		atomic_clear_int
291 #define	atomic_clear_rel_int		atomic_clear_int
292 #define	atomic_add_acq_int		atomic_add_int
293 #define	atomic_add_rel_int		atomic_add_int
294 #define	atomic_subtract_acq_int		atomic_subtract_int
295 #define	atomic_subtract_rel_int		atomic_subtract_int
296 #define atomic_cmpset_acq_int		atomic_cmpset_int
297 #define atomic_cmpset_rel_int		atomic_cmpset_int
298 
299 #define	atomic_set_acq_long		atomic_set_long
300 #define	atomic_set_rel_long		atomic_set_long
301 #define	atomic_clear_acq_long		atomic_clear_long
302 #define	atomic_clear_rel_long		atomic_clear_long
303 #define	atomic_add_acq_long		atomic_add_long
304 #define	atomic_add_rel_long		atomic_add_long
305 #define	atomic_subtract_acq_long	atomic_subtract_long
306 #define	atomic_subtract_rel_long	atomic_subtract_long
307 #define	atomic_cmpset_long		atomic_cmpset_int
308 #define	atomic_cmpset_acq_long		atomic_cmpset_acq_int
309 #define	atomic_cmpset_rel_long		atomic_cmpset_rel_int
310 
311 #define atomic_cmpset_acq_ptr		atomic_cmpset_ptr
312 #define atomic_cmpset_rel_ptr		atomic_cmpset_ptr
313 
314 #define	atomic_set_8		atomic_set_char
315 #define	atomic_set_acq_8	atomic_set_acq_char
316 #define	atomic_set_rel_8	atomic_set_rel_char
317 #define	atomic_clear_8		atomic_clear_char
318 #define	atomic_clear_acq_8	atomic_clear_acq_char
319 #define	atomic_clear_rel_8	atomic_clear_rel_char
320 #define	atomic_add_8		atomic_add_char
321 #define	atomic_add_acq_8	atomic_add_acq_char
322 #define	atomic_add_rel_8	atomic_add_rel_char
323 #define	atomic_subtract_8	atomic_subtract_char
324 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
325 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
326 #define	atomic_load_acq_8	atomic_load_acq_char
327 #define	atomic_store_rel_8	atomic_store_rel_char
328 
329 #define	atomic_set_16		atomic_set_short
330 #define	atomic_set_acq_16	atomic_set_acq_short
331 #define	atomic_set_rel_16	atomic_set_rel_short
332 #define	atomic_clear_16		atomic_clear_short
333 #define	atomic_clear_acq_16	atomic_clear_acq_short
334 #define	atomic_clear_rel_16	atomic_clear_rel_short
335 #define	atomic_add_16		atomic_add_short
336 #define	atomic_add_acq_16	atomic_add_acq_short
337 #define	atomic_add_rel_16	atomic_add_rel_short
338 #define	atomic_subtract_16	atomic_subtract_short
339 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
340 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
341 #define	atomic_load_acq_16	atomic_load_acq_short
342 #define	atomic_store_rel_16	atomic_store_rel_short
343 
344 #define	atomic_set_32		atomic_set_int
345 #define	atomic_set_acq_32	atomic_set_acq_int
346 #define	atomic_set_rel_32	atomic_set_rel_int
347 #define	atomic_clear_32		atomic_clear_int
348 #define	atomic_clear_acq_32	atomic_clear_acq_int
349 #define	atomic_clear_rel_32	atomic_clear_rel_int
350 #define	atomic_add_32		atomic_add_int
351 #define	atomic_add_acq_32	atomic_add_acq_int
352 #define	atomic_add_rel_32	atomic_add_rel_int
353 #define	atomic_subtract_32	atomic_subtract_int
354 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
355 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
356 #define	atomic_load_acq_32	atomic_load_acq_int
357 #define	atomic_store_rel_32	atomic_store_rel_int
358 #define	atomic_cmpset_32	atomic_cmpset_int
359 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
360 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
361 #define	atomic_readandclear_32	atomic_readandclear_int
362 
363 #if !defined(WANT_FUNCTIONS)
364 static __inline int
365 atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
366 {
367 
368 	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
369 	    (u_int)src));
370 }
371 
372 static __inline void *
373 atomic_load_acq_ptr(volatile void *p)
374 {
375 	/*
376 	 * The apparently-bogus cast to intptr_t in the following is to
377 	 * avoid a warning from "gcc -Wbad-function-cast".
378 	 */
379 	return ((void *)(intptr_t)atomic_load_acq_int((volatile u_int *)p));
380 }
381 
382 static __inline void
383 atomic_store_rel_ptr(volatile void *p, void *v)
384 {
385 	atomic_store_rel_int((volatile u_int *)p, (u_int)v);
386 }
387 
388 #define ATOMIC_PTR(NAME)				\
389 static __inline void					\
390 atomic_##NAME##_ptr(volatile void *p, uintptr_t v)	\
391 {							\
392 	atomic_##NAME##_int((volatile u_int *)p, v);	\
393 }							\
394 							\
395 static __inline void					\
396 atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v)	\
397 {							\
398 	atomic_##NAME##_acq_int((volatile u_int *)p, v);\
399 }							\
400 							\
401 static __inline void					\
402 atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v)	\
403 {							\
404 	atomic_##NAME##_rel_int((volatile u_int *)p, v);\
405 }
406 
407 ATOMIC_PTR(set)
408 ATOMIC_PTR(clear)
409 ATOMIC_PTR(add)
410 ATOMIC_PTR(subtract)
411 
412 #undef ATOMIC_PTR
413 
414 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
415 
416 static __inline u_int
417 atomic_readandclear_int(volatile u_int *addr)
418 {
419 	u_int result;
420 
421 	__asm __volatile (
422 	"	xorl	%0,%0 ;		"
423 	"	xchgl	%1,%0 ;		"
424 	"# atomic_readandclear_int"
425 	: "=&r" (result)		/* 0 (result) */
426 	: "m" (*addr));			/* 1 (addr) */
427 
428 	return (result);
429 }
430 
431 static __inline u_long
432 atomic_readandclear_long(volatile u_long *addr)
433 {
434 	u_long result;
435 
436 	__asm __volatile (
437 	"	xorl	%0,%0 ;		"
438 	"	xchgl	%1,%0 ;		"
439 	"# atomic_readandclear_int"
440 	: "=&r" (result)		/* 0 (result) */
441 	: "m" (*addr));			/* 1 (addr) */
442 
443 	return (result);
444 }
445 
446 #else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */
447 
448 extern u_long	atomic_readandclear_long(volatile u_long *);
449 extern u_int	atomic_readandclear_int(volatile u_int *);
450 
451 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
452 
453 #endif	/* !defined(WANT_FUNCTIONS) */
454 #endif /* ! _MACHINE_ATOMIC_H_ */
455