xref: /dragonfly/sys/vfs/hammer2/hammer2_ccms.c (revision 7d3e9a5b)
1 /*
2  * Copyright (c) 2006,2012-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * The Cache Coherency Management System (CCMS)
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <machine/limits.h>
42 
43 #include <sys/spinlock2.h>
44 
45 #include "hammer2_ccms.h"
46 #include "hammer2.h"
47 
48 int ccms_debug = 0;
49 
50 void
51 ccms_cst_init(ccms_cst_t *cst)
52 {
53 	bzero(cst, sizeof(*cst));
54 	spin_init(&cst->spin, "ccmscst");
55 }
56 
57 void
58 ccms_cst_uninit(ccms_cst_t *cst)
59 {
60 	KKASSERT(cst->count == 0);
61 	if (cst->state != CCMS_STATE_INVALID) {
62 		/* XXX */
63 	}
64 }
65 
66 /************************************************************************
67  *			    CST SUPPORT FUNCTIONS			*
68  ************************************************************************/
69 
70 /*
71  * Acquire local cache state & lock.  If the current thread already holds
72  * the lock exclusively we bump the exclusive count, even if the thread is
73  * trying to get a shared lock.
74  */
75 void
76 ccms_thread_lock(ccms_cst_t *cst, ccms_state_t state)
77 {
78 	/*
79 	 * Regardless of the type of lock requested if the current thread
80 	 * already holds an exclusive lock we bump the exclusive count and
81 	 * return.  This requires no spinlock.
82 	 */
83 	LOCKENTER;
84 	if (cst->count < 0 && cst->td == curthread) {
85 		--cst->count;
86 		return;
87 	}
88 
89 	/*
90 	 * Otherwise use the spinlock to interlock the operation and sleep
91 	 * as necessary.
92 	 */
93 	hammer2_spin_ex(&cst->spin);
94 	if (state == CCMS_STATE_SHARED) {
95 		while (cst->count < 0 || cst->upgrade) {
96 			cst->blocked = 1;
97 			ssleep(cst, &cst->spin, 0, "ccmslck", hz);
98 		}
99 		++cst->count;
100 		KKASSERT(cst->td == NULL);
101 	} else if (state == CCMS_STATE_EXCLUSIVE) {
102 		while (cst->count != 0 || cst->upgrade) {
103 			cst->blocked = 1;
104 			ssleep(cst, &cst->spin, 0, "ccmslck", hz);
105 		}
106 		cst->count = -1;
107 		cst->td = curthread;
108 	} else {
109 		hammer2_spin_unex(&cst->spin);
110 		panic("ccms_thread_lock: bad state %d\n", state);
111 	}
112 	hammer2_spin_unex(&cst->spin);
113 }
114 
115 /*
116  * Same as ccms_thread_lock() but acquires the lock non-blocking.  Returns
117  * 0 on success, EBUSY on failure.
118  */
119 int
120 ccms_thread_lock_nonblock(ccms_cst_t *cst, ccms_state_t state)
121 {
122 	if (cst->count < 0 && cst->td == curthread) {
123 		--cst->count;
124 		LOCKENTER;
125 		return(0);
126 	}
127 
128 	hammer2_spin_ex(&cst->spin);
129 	if (state == CCMS_STATE_SHARED) {
130 		if (cst->count < 0 || cst->upgrade) {
131 			hammer2_spin_unex(&cst->spin);
132 			return (EBUSY);
133 		}
134 		++cst->count;
135 		KKASSERT(cst->td == NULL);
136 	} else if (state == CCMS_STATE_EXCLUSIVE) {
137 		if (cst->count != 0 || cst->upgrade) {
138 			hammer2_spin_unex(&cst->spin);
139 			return (EBUSY);
140 		}
141 		cst->count = -1;
142 		cst->td = curthread;
143 	} else {
144 		hammer2_spin_unex(&cst->spin);
145 		panic("ccms_thread_lock_nonblock: bad state %d\n", state);
146 	}
147 	hammer2_spin_unex(&cst->spin);
148 	LOCKENTER;
149 	return(0);
150 }
151 
152 ccms_state_t
153 ccms_thread_lock_temp_release(ccms_cst_t *cst)
154 {
155 	if (cst->count < 0) {
156 		ccms_thread_unlock(cst);
157 		return(CCMS_STATE_EXCLUSIVE);
158 	}
159 	if (cst->count > 0) {
160 		ccms_thread_unlock(cst);
161 		return(CCMS_STATE_SHARED);
162 	}
163 	return (CCMS_STATE_INVALID);
164 }
165 
166 void
167 ccms_thread_lock_temp_restore(ccms_cst_t *cst, ccms_state_t ostate)
168 {
169 	ccms_thread_lock(cst, ostate);
170 }
171 
172 /*
173  * Temporarily upgrade a thread lock for making local structural changes.
174  * No new shared or exclusive locks can be acquired by others while we are
175  * upgrading, but other upgraders are allowed.
176  */
177 ccms_state_t
178 ccms_thread_lock_upgrade(ccms_cst_t *cst)
179 {
180 	/*
181 	 * Nothing to do if already exclusive
182 	 */
183 	if (cst->count < 0) {
184 		KKASSERT(cst->td == curthread);
185 		return(CCMS_STATE_EXCLUSIVE);
186 	}
187 
188 	/*
189 	 * Convert a shared lock to exclusive.
190 	 */
191 	if (cst->count > 0) {
192 		hammer2_spin_ex(&cst->spin);
193 		++cst->upgrade;
194 		--cst->count;
195 		while (cst->count) {
196 			cst->blocked = 1;
197 			ssleep(cst, &cst->spin, 0, "ccmsupg", hz);
198 		}
199 		cst->count = -1;
200 		cst->td = curthread;
201 		hammer2_spin_unex(&cst->spin);
202 		return(CCMS_STATE_SHARED);
203 	}
204 	panic("ccms_thread_lock_upgrade: not locked");
205 	/* NOT REACHED */
206 	return(0);
207 }
208 
209 void
210 ccms_thread_lock_downgrade(ccms_cst_t *cst, ccms_state_t ostate)
211 {
212 	if (ostate == CCMS_STATE_SHARED) {
213 		KKASSERT(cst->td == curthread);
214 		KKASSERT(cst->count == -1);
215 		hammer2_spin_ex(&cst->spin);
216 		--cst->upgrade;
217 		cst->count = 1;
218 		cst->td = NULL;
219 		if (cst->blocked) {
220 			cst->blocked = 0;
221 			hammer2_spin_unex(&cst->spin);
222 			wakeup(cst);
223 		} else {
224 			hammer2_spin_unex(&cst->spin);
225 		}
226 	}
227 	/* else nothing to do if excl->excl */
228 }
229 
230 /*
231  * Release a local thread lock
232  */
233 void
234 ccms_thread_unlock(ccms_cst_t *cst)
235 {
236 	LOCKEXIT;
237 	if (cst->count < 0) {
238 		/*
239 		 * Exclusive
240 		 */
241 		KKASSERT(cst->td == curthread);
242 		if (cst->count < -1) {
243 			++cst->count;
244 			return;
245 		}
246 		hammer2_spin_ex(&cst->spin);
247 		KKASSERT(cst->count == -1);
248 		cst->count = 0;
249 		cst->td = NULL;
250 		if (cst->blocked) {
251 			cst->blocked = 0;
252 			hammer2_spin_unex(&cst->spin);
253 			wakeup(cst);
254 			return;
255 		}
256 		hammer2_spin_unex(&cst->spin);
257 	} else if (cst->count > 0) {
258 		/*
259 		 * Shared
260 		 */
261 		hammer2_spin_ex(&cst->spin);
262 		if (--cst->count == 0 && cst->blocked) {
263 			cst->blocked = 0;
264 			hammer2_spin_unex(&cst->spin);
265 			wakeup(cst);
266 			return;
267 		}
268 		hammer2_spin_unex(&cst->spin);
269 	} else {
270 		panic("ccms_thread_unlock: bad zero count\n");
271 	}
272 }
273 
274 void
275 ccms_thread_lock_setown(ccms_cst_t *cst)
276 {
277 	KKASSERT(cst->count < 0);
278 	cst->td = curthread;
279 }
280 
281 /*
282  * Release a previously upgraded local thread lock
283  */
284 void
285 ccms_thread_unlock_upgraded(ccms_cst_t *cst, ccms_state_t ostate)
286 {
287 	if (ostate == CCMS_STATE_SHARED) {
288 		LOCKEXIT;
289 		KKASSERT(cst->td == curthread);
290 		KKASSERT(cst->count == -1);
291 		hammer2_spin_ex(&cst->spin);
292 		--cst->upgrade;
293 		cst->count = 0;
294 		cst->td = NULL;
295 		if (cst->blocked) {
296 			cst->blocked = 0;
297 			hammer2_spin_unex(&cst->spin);
298 			wakeup(cst);
299 		} else {
300 			hammer2_spin_unex(&cst->spin);
301 		}
302 	} else {
303 		ccms_thread_unlock(cst);
304 	}
305 }
306 
307 int
308 ccms_thread_lock_owned(ccms_cst_t *cst)
309 {
310 	return(cst->count < 0 && cst->td == curthread);
311 }
312