xref: /dragonfly/sys/sys/mplock2.h (revision 8a0bcd56)
1 /*
2  * SYS/MPLOCK2.H
3  *
4  * Implement the MP lock.  Note that debug operations
5  */
6 #ifndef _SYS_MPLOCK2_H_
7 #define _SYS_MPLOCK2_H_
8 
9 #ifndef _MACHINE_ATOMIC_H_
10 #include <machine/atomic.h>
11 #endif
12 #ifndef _SYS_THREAD_H_
13 #include <sys/thread.h>
14 #endif
15 #ifndef _SYS_GLOBALDATA_H_
16 #include <sys/globaldata.h>
17 #endif
18 
19 #ifdef SMP
20 
21 #define get_mplock()		get_mplock_debug(__FILE__, __LINE__)
22 #define try_mplock()		try_mplock_debug(__FILE__, __LINE__)
23 #define cpu_try_mplock()	cpu_try_mplock_debug(__FILE__, __LINE__)
24 
25 void _get_mplock_predisposed(const char *file, int line);
26 void _get_mplock_contested(const char *file, int line);
27 void _try_mplock_contested(const char *file, int line);
28 void _cpu_try_mplock_contested(const char *file, int line);
29 void _rel_mplock_contested(void);
30 void cpu_get_initial_mplock(void);
31 void handle_cpu_contention_mask(void);
32 void yield_mplock(struct thread *td);
33 
34 extern int mp_lock;
35 extern int cpu_contention_mask;
36 extern const char *mp_lock_holder_file;
37 extern int mp_lock_holder_line;
38 
39 /*
40  * Acquire the MP lock, block until we get it.
41  *
42  * In order to acquire the MP lock we must first pre-dispose td_mpcount
43  * for the acquisition and then get the actual lock.
44  *
45  * The mplock must check a number of conditions and it is better to
46  * leave it to a procedure if we cannot get it trivially.
47  *
48  * WARNING: The mp_lock and td_mpcount are not necessarily synchronized.
49  *	    We must synchronize them here.  They can be unsynchronized
50  *	    for a variety of reasons including predisposition, td_xpcount,
51  *	    and so forth.
52  */
53 static __inline
54 void
55 get_mplock_debug(const char *file, int line)
56 {
57 	globaldata_t gd = mycpu;
58 	thread_t td = gd->gd_curthread;
59 
60 	++td->td_mpcount;
61 	if (mp_lock != gd->gd_cpuid)
62 		_get_mplock_predisposed(file, line);
63 }
64 
65 /*
66  * Release the MP lock
67  *
68  * In order to release the MP lock we must first pre-dispose td_mpcount
69  * for the release and then, if it is 0 and td_xpcount is also zero,
70  * release the actual lock.
71  *
72  * The contested function is called only if we are unable to release the
73  * Actual lock.  This can occur if we raced an interrupt after decrementing
74  * td_mpcount to 0 and the interrupt acquired and released the lock.
75  *
76  * The function also catches the td_mpcount underflow case because the
77  * lock will be in a released state and thus fail the subsequent release.
78  *
79  * WARNING: The mp_lock and td_mpcount are not necessarily synchronized.
80  *	    We must synchronize them here.  They can be unsynchronized
81  *	    for a variety of reasons including predisposition, td_xpcount,
82  *	    and so forth.
83  */
84 static __inline
85 void
86 rel_mplock(void)
87 {
88 	globaldata_t gd = mycpu;
89 	thread_t td = gd->gd_curthread;
90 	int n;
91 
92 	n = --td->td_mpcount;
93 	if (n < 0 || ((n + td->td_xpcount) == 0 &&
94 		      atomic_cmpset_int(&mp_lock, gd->gd_cpuid, -1) == 0)) {
95 		_rel_mplock_contested();
96 	}
97 }
98 
99 /*
100  * Attempt to acquire the MP lock, returning 0 on failure and 1 on success.
101  *
102  * The contested function is called on failure and typically serves simply
103  * to log the attempt (if debugging enabled).
104  */
105 static __inline
106 int
107 try_mplock_debug(const char *file, int line)
108 {
109 	globaldata_t gd = mycpu;
110 	thread_t td = gd->gd_curthread;
111 
112 	++td->td_mpcount;
113 	if (mp_lock != gd->gd_cpuid &&
114 	    atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) {
115 		_try_mplock_contested(file, line);
116 		return(0);
117 	}
118 #ifdef INVARIANTS
119 	mp_lock_holder_file = file;
120 	mp_lock_holder_line = line;
121 #endif
122 	return(1);
123 }
124 
125 /*
126  * Low level acquisition of the MP lock ignoring curthred->td_mpcount
127  *
128  * This version of try_mplock() is used when the caller has already
129  * predisposed td->td_mpcount.
130  *
131  * Returns non-zero on success, 0 on failure.
132  *
133  * WARNING: Must be called from within a critical section if td_mpcount is
134  *	    zero, otherwise an itnerrupt race can cause the lock to be lost.
135  */
136 static __inline
137 int
138 cpu_try_mplock_debug(const char *file, int line)
139 {
140 	globaldata_t gd = mycpu;
141 
142 	if (mp_lock != gd->gd_cpuid &&
143 	    atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) {
144 		_cpu_try_mplock_contested(file, line);
145 		return(0);
146 	}
147 #ifdef INVARIANTS
148 	mp_lock_holder_file = file;
149 	mp_lock_holder_line = line;
150 #endif
151 	return(1);
152 }
153 
154 /*
155  * A cpu wanted the MP lock but could not get it.  This function is also
156  * called directly from the LWKT scheduler.
157  *
158  * Reentrant, may be called even if the cpu is already contending the MP
159  * lock.
160  */
161 static __inline
162 void
163 set_cpu_contention_mask(globaldata_t gd)
164 {
165 	atomic_set_int(&cpu_contention_mask, gd->gd_cpumask);
166 }
167 
168 /*
169  * A cpu is no longer contending for the MP lock after previously contending
170  * for it.
171  *
172  * Reentrant, may be called even if the cpu was not previously contending
173  * the MP lock.
174  */
175 static __inline
176 void
177 clr_cpu_contention_mask(globaldata_t gd)
178 {
179 	atomic_clear_int(&cpu_contention_mask, gd->gd_cpumask);
180 }
181 
182 static __inline
183 int
184 owner_mplock(void)
185 {
186 	return (mp_lock);
187 }
188 
189 /*
190  * Low level release of the MP lock ignoring curthread->td_mpcount
191  *
192  * WARNING: Caller must be in a critical section, otherwise the
193  *	    mp_lock can be lost from an interrupt race and we would
194  *	    end up clearing someone else's lock.
195  */
196 static __inline void
197 cpu_rel_mplock(int cpu)
198 {
199 	(void)atomic_cmpset_int(&mp_lock, cpu, -1);
200 }
201 
202 #define MP_LOCK_HELD(gd)			\
203 	(mp_lock == gd->gd_cpuid)
204 
205 #define ASSERT_MP_LOCK_HELD(td)			\
206 	KASSERT(MP_LOCK_HELD(td->td_gd),	\
207 		("MP_LOCK_HELD: Not held thread %p", td))
208 
209 #else
210 
211 /*
212  * UNI-PROCESSOR BUILD - Degenerate case macros
213  */
214 #define	get_mplock()
215 #define	rel_mplock()
216 #define try_mplock()		1
217 #define owner_mplock()		0
218 #define MP_LOCK_HELD(gd)	(!0)
219 #define ASSERT_MP_LOCK_HELD(td)
220 
221 #endif
222 
223 #endif
224