xref: /openbsd/sys/arch/amd64/include/atomic.h (revision 17df1aa7)
1 /*	$OpenBSD: atomic.h,v 1.6 2007/05/25 16:22:11 art Exp $	*/
2 /*	$NetBSD: atomic.h,v 1.1 2003/04/26 18:39:37 fvdl Exp $	*/
3 
4 /*
5  * Copyright 2002 (c) Wasabi Systems, Inc.
6  * All rights reserved.
7  *
8  * Written by Frank van der Linden for Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed for the NetBSD Project by
21  *      Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #ifndef _AMD64_ATOMIC_H
40 #define _AMD64_ATOMIC_H
41 
42 /*
43  * Perform atomic operations on memory. Should be atomic with respect
44  * to interrupts and multiple processors.
45  *
46  * void atomic_setbits_int(volatile u_int *a, u_int mask) { *a |= mask; }
47  * void atomic_clearbits_int(volatile u_int *a, u_int mas) { *a &= ~mask; }
48  */
49 
50 #if defined(_KERNEL) && !defined(_LOCORE)
51 
52 #ifdef MULTIPROCESSOR
53 #define LOCK "lock"
54 #else
55 #define LOCK
56 #endif
57 
58 static __inline u_int64_t
59 x86_atomic_testset_u64(volatile u_int64_t *ptr, u_int64_t val)
60 {
61 	__asm__ volatile ("xchgq %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
62 	return val;
63 }
64 
65 static __inline u_int32_t
66 x86_atomic_testset_u32(volatile u_int32_t *ptr, u_int32_t val)
67 {
68 	__asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
69 	return val;
70 }
71 
72 static __inline int32_t
73 x86_atomic_testset_i32(volatile int32_t *ptr, int32_t val)
74 {
75 	__asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
76 	return val;
77 }
78 
79 
80 
81 static __inline void
82 x86_atomic_setbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
83 {
84 	__asm __volatile(LOCK " orl %1,%0" :  "=m" (*ptr) : "ir" (bits));
85 }
86 
87 static __inline void
88 x86_atomic_clearbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
89 {
90 	__asm __volatile(LOCK " andl %1,%0" :  "=m" (*ptr) : "ir" (~bits));
91 }
92 
93 static __inline u_long
94 x86_atomic_cas_ul(volatile u_long *ptr, u_long expect, u_long set)
95 {
96 	u_long res;
97 
98 	__asm volatile(LOCK " cmpxchgq %2, %1" : "=a" (res), "=m" (*ptr)
99 	    : "r" (set), "a" (expect), "m" (*ptr) : "memory");
100 
101 	return (res);
102 }
103 
104 /*
105  * XXX XXX XXX
106  * theoretically 64bit cannot be used as
107  * an "i" and thus if we ever try to give
108  * these anything from the high dword there
109  * is an asm error pending
110  */
111 static __inline void
112 x86_atomic_setbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
113 {
114 	__asm __volatile(LOCK " orq %1,%0" :  "=m" (*ptr) : "ir" (bits));
115 }
116 
117 static __inline void
118 x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
119 {
120 	__asm __volatile(LOCK " andq %1,%0" :  "=m" (*ptr) : "ir" (~bits));
121 }
122 
123 #define x86_atomic_testset_ul	x86_atomic_testset_u32
124 #define x86_atomic_testset_i	x86_atomic_testset_i32
125 #define x86_atomic_setbits_l	x86_atomic_setbits_u32
126 #define x86_atomic_setbits_ul	x86_atomic_setbits_u32
127 #define x86_atomic_clearbits_l	x86_atomic_clearbits_u32
128 #define x86_atomic_clearbits_ul	x86_atomic_clearbits_u32
129 
130 #define atomic_setbits_int x86_atomic_setbits_u32
131 #define atomic_clearbits_int x86_atomic_clearbits_u32
132 
133 #undef LOCK
134 
135 #endif /* defined(_KERNEL) && !defined(_LOCORE) */
136 #endif /* _AMD64_ATOMIC_H_ */
137