xref: /openbsd/sys/arch/amd64/include/atomic.h (revision 898184e3)
1 /*	$OpenBSD: atomic.h,v 1.9 2012/11/19 15:18:06 pirofti Exp $	*/
2 /*	$NetBSD: atomic.h,v 1.1 2003/04/26 18:39:37 fvdl Exp $	*/
3 
4 /*
5  * Copyright 2002 (c) Wasabi Systems, Inc.
6  * All rights reserved.
7  *
8  * Written by Frank van der Linden for Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed for the NetBSD Project by
21  *      Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #ifndef _MACHINE_ATOMIC_H_
40 #define _MACHINE_ATOMIC_H_
41 
42 /*
43  * Perform atomic operations on memory. Should be atomic with respect
44  * to interrupts and multiple processors.
45  *
46  * void atomic_setbits_int(volatile u_int *a, u_int mask) { *a |= mask; }
47  * void atomic_clearbits_int(volatile u_int *a, u_int mas) { *a &= ~mask; }
48  */
49 
50 #if defined(_KERNEL) && !defined(_LOCORE)
51 
52 #ifdef MULTIPROCESSOR
53 #define LOCK "lock"
54 #else
55 #define LOCK
56 #endif
57 
58 static __inline u_int64_t
59 x86_atomic_testset_u64(volatile u_int64_t *ptr, u_int64_t val)
60 {
61 	__asm__ volatile ("xchgq %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
62 	return val;
63 }
64 
65 static __inline u_int32_t
66 x86_atomic_testset_u32(volatile u_int32_t *ptr, u_int32_t val)
67 {
68 	__asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
69 	return val;
70 }
71 
72 static __inline int32_t
73 x86_atomic_testset_i32(volatile int32_t *ptr, int32_t val)
74 {
75 	__asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
76 	return val;
77 }
78 
79 
80 
81 static __inline void
82 x86_atomic_setbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
83 {
84 	__asm __volatile(LOCK " orl %1,%0" :  "=m" (*ptr) : "ir" (bits));
85 }
86 
87 static __inline void
88 x86_atomic_clearbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
89 {
90 	__asm __volatile(LOCK " andl %1,%0" :  "=m" (*ptr) : "ir" (~bits));
91 }
92 
93 static __inline int
94 x86_atomic_cas_int32(volatile int32_t *ptr, int32_t expect, int32_t set)
95 {
96 	int res;
97 
98 	__asm volatile(LOCK " cmpxchgl %2, %1" : "=a" (res), "=m" (*ptr)
99 	    : "r" (set), "a" (expect), "m" (*ptr) : "memory");
100 
101 	return (res);
102 }
103 
104 static __inline u_long
105 x86_atomic_cas_ul(volatile u_long *ptr, u_long expect, u_long set)
106 {
107 	u_long res;
108 
109 	__asm volatile(LOCK " cmpxchgq %2, %1" : "=a" (res), "=m" (*ptr)
110 	    : "r" (set), "a" (expect), "m" (*ptr) : "memory");
111 
112 	return (res);
113 }
114 
115 /*
116  * XXX XXX XXX
117  * theoretically 64bit cannot be used as
118  * an "i" and thus if we ever try to give
119  * these anything from the high dword there
120  * is an asm error pending
121  */
122 static __inline void
123 x86_atomic_setbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
124 {
125 	__asm __volatile(LOCK " orq %1,%0" :  "=m" (*ptr) : "ir" (bits));
126 }
127 
128 static __inline void
129 x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
130 {
131 	__asm __volatile(LOCK " andq %1,%0" :  "=m" (*ptr) : "ir" (~bits));
132 }
133 
134 #define x86_atomic_testset_ul	x86_atomic_testset_u64
135 #define x86_atomic_testset_i	x86_atomic_testset_i32
136 #define x86_atomic_setbits_ul	x86_atomic_setbits_u64
137 #define x86_atomic_clearbits_ul	x86_atomic_clearbits_u64
138 
139 #define atomic_setbits_int x86_atomic_setbits_u32
140 #define atomic_clearbits_int x86_atomic_clearbits_u32
141 
142 #undef LOCK
143 
144 #endif /* defined(_KERNEL) && !defined(_LOCORE) */
145 #endif /* _MACHINE_ATOMIC_H_ */
146