xref: /dragonfly/sys/sys/spinlock2.h (revision 2983445f)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $DragonFly: src/sys/sys/spinlock2.h,v 1.12 2008/06/04 04:34:54 nth Exp $
33  */
34 
35 #ifndef _SYS_SPINLOCK2_H_
36 #define _SYS_SPINLOCK2_H_
37 
38 #ifndef _KERNEL
39 
40 #error "This file should not be included by userland programs."
41 
42 #else
43 
44 #ifndef _SYS_SYSTM_H_
45 #include <sys/systm.h>
46 #endif
47 #ifndef _SYS_THREAD2_H_
48 #include <sys/thread2.h>
49 #endif
50 #ifndef _SYS_GLOBALDATA_H_
51 #include <sys/globaldata.h>
52 #endif
53 #ifndef _MACHINE_ATOMIC_H_
54 #include <machine/atomic.h>
55 #endif
56 #ifndef _MACHINE_CPUFUNC_H_
57 #include <machine/cpufunc.h>
58 #endif
59 
60 #ifdef SMP
61 
62 extern int spin_trylock_wr_contested2(globaldata_t gd);
63 extern void spin_lock_wr_contested2(struct spinlock *mtx);
64 
65 #endif
66 
67 #ifdef SMP
68 
69 /*
70  * Attempt to obtain an exclusive spinlock.  Returns FALSE on failure,
71  * TRUE on success.
72  */
73 static __inline boolean_t
74 spin_trylock(struct spinlock *mtx)
75 {
76 	globaldata_t gd = mycpu;
77 	int value;
78 
79 	++gd->gd_curthread->td_critcount;
80 	cpu_ccfence();
81 	++gd->gd_spinlocks_wr;
82 	if ((value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE)) != 0)
83 		return (spin_trylock_wr_contested2(gd));
84 #ifdef SMP
85 #ifdef DEBUG_LOCKS
86 	int i;
87 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
88 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
89 			gd->gd_curthread->td_spinlock_stack_id[i] = 1;
90 			gd->gd_curthread->td_spinlock_stack[i] = mtx;
91 			gd->gd_curthread->td_spinlock_caller_pc[i] =
92 						__builtin_return_address(0);
93 			break;
94 		}
95 	}
96 #endif
97 #endif
98 	return (TRUE);
99 }
100 
101 #else
102 
103 static __inline boolean_t
104 spin_trylock(struct spinlock *mtx)
105 {
106 	globaldata_t gd = mycpu;
107 
108 	++gd->gd_curthread->td_critcount;
109 	cpu_ccfence();
110 	++gd->gd_spinlocks_wr;
111 	return (TRUE);
112 }
113 
114 #endif
115 
116 /*
117  * Obtain an exclusive spinlock and return.
118  */
119 static __inline void
120 spin_lock_quick(globaldata_t gd, struct spinlock *mtx)
121 {
122 #ifdef SMP
123 	int value;
124 #endif
125 
126 	++gd->gd_curthread->td_critcount;
127 	cpu_ccfence();
128 	++gd->gd_spinlocks_wr;
129 #ifdef SMP
130 	if ((value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE)) != 0)
131 		spin_lock_wr_contested2(mtx);
132 #ifdef DEBUG_LOCKS
133 	int i;
134 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
135 		if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
136 			gd->gd_curthread->td_spinlock_stack_id[i] = 1;
137 			gd->gd_curthread->td_spinlock_stack[i] = mtx;
138 			gd->gd_curthread->td_spinlock_caller_pc[i] =
139 				__builtin_return_address(0);
140 			break;
141 		}
142 	}
143 #endif
144 #endif
145 }
146 
147 static __inline void
148 spin_lock(struct spinlock *mtx)
149 {
150 	spin_lock_quick(mycpu, mtx);
151 }
152 
153 /*
154  * Release an exclusive spinlock.  We can just do this passively, only
155  * ensuring that our spinlock count is left intact until the mutex is
156  * cleared.
157  */
158 static __inline void
159 spin_unlock_quick(globaldata_t gd, struct spinlock *mtx)
160 {
161 #ifdef SMP
162 #ifdef DEBUG_LOCKS
163 	int i;
164 	for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
165 		if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
166 		    (gd->gd_curthread->td_spinlock_stack[i] == mtx)) {
167 			gd->gd_curthread->td_spinlock_stack_id[i] = 0;
168 			gd->gd_curthread->td_spinlock_stack[i] = NULL;
169 			gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
170 			break;
171 		}
172 	}
173 #endif
174 	mtx->lock = 0;
175 #endif
176 	KKASSERT(gd->gd_spinlocks_wr > 0);
177 	--gd->gd_spinlocks_wr;
178 	cpu_ccfence();
179 	--gd->gd_curthread->td_critcount;
180 }
181 
182 static __inline void
183 spin_unlock(struct spinlock *mtx)
184 {
185 	spin_unlock_quick(mycpu, mtx);
186 }
187 
188 static __inline void
189 spin_init(struct spinlock *mtx)
190 {
191         mtx->lock = 0;
192 }
193 
194 static __inline void
195 spin_uninit(struct spinlock *mtx)
196 {
197 	/* unused */
198 }
199 
200 #endif	/* _KERNEL */
201 #endif	/* _SYS_SPINLOCK2_H_ */
202 
203