1 /*	$NetBSD: locks.c,v 1.72 2016/01/26 23:12:17 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.72 2016/01/26 23:12:17 pooka Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35 
36 #include <rump-sys/kern.h>
37 
38 #include <rump/rumpuser.h>
39 
40 #ifdef LOCKDEBUG
41 const int rump_lockdebug = 1;
42 #else
43 const int rump_lockdebug = 0;
44 #endif
45 
46 /*
47  * Simple lockdebug.  If it's compiled in, it's always active.
48  * Currently available only for mtx/rwlock.
49  */
50 #ifdef LOCKDEBUG
51 #include <sys/lockdebug.h>
52 
53 static lockops_t mutex_lockops = {
54 	"mutex",
55 	LOCKOPS_SLEEP,
56 	NULL
57 };
58 static lockops_t rw_lockops = {
59 	"rwlock",
60 	LOCKOPS_SLEEP,
61 	NULL
62 };
63 
64 #define ALLOCK(lock, ops)		\
65     lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
66 #define FREELOCK(lock)			\
67     lockdebug_free(lock)
68 #define WANTLOCK(lock, shar)	\
69     lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar)
70 #define LOCKED(lock, shar)		\
71     lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
72 #define UNLOCKED(lock, shar)		\
73     lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
74 #define BARRIER(lock, slp)		\
75     lockdebug_barrier(lock, slp)
76 #else
77 #define ALLOCK(a, b)
78 #define FREELOCK(a)
79 #define WANTLOCK(a, b)
80 #define LOCKED(a, b)
81 #define UNLOCKED(a, b)
82 #define BARRIER(a, b)
83 #endif
84 
85 /*
86  * We map locks to pthread routines.  The difference between kernel
87  * and rumpuser routines is that while the kernel uses static
88  * storage, rumpuser allocates the object from the heap.  This
89  * indirection is necessary because we don't know the size of
90  * pthread objects here.  It is also beneficial, since we can
91  * be easily compatible with the kernel ABI because all kernel
92  * objects regardless of machine architecture are always at least
93  * the size of a pointer.  The downside, of course, is a performance
94  * penalty.
95  */
96 
97 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
98 
99 void
mutex_init(kmutex_t * mtx,kmutex_type_t type,int ipl)100 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
101 {
102 	int ruflags = RUMPUSER_MTX_KMUTEX;
103 	int isspin;
104 
105 	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
106 
107 	/*
108 	 * Try to figure out if the caller wanted a spin mutex or
109 	 * not with this easy set of conditionals.  The difference
110 	 * between a spin mutex and an adaptive mutex for a rump
111 	 * kernel is that the hypervisor does not relinquish the
112 	 * rump kernel CPU context for a spin mutex.  The
113 	 * hypervisor itself may block even when "spinning".
114 	 */
115 	if (type == MUTEX_SPIN) {
116 		isspin = 1;
117 	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
118 	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
119 	    ipl == IPL_SOFTSERIAL) {
120 		isspin = 0;
121 	} else {
122 		isspin = 1;
123 	}
124 
125 	if (isspin)
126 		ruflags |= RUMPUSER_MTX_SPIN;
127 	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
128 	ALLOCK(mtx, &mutex_lockops);
129 }
130 
131 void
mutex_destroy(kmutex_t * mtx)132 mutex_destroy(kmutex_t *mtx)
133 {
134 
135 	FREELOCK(mtx);
136 	rumpuser_mutex_destroy(RUMPMTX(mtx));
137 }
138 
139 void
mutex_enter(kmutex_t * mtx)140 mutex_enter(kmutex_t *mtx)
141 {
142 
143 	WANTLOCK(mtx, 0);
144 	BARRIER(mtx, 1);
145 	rumpuser_mutex_enter(RUMPMTX(mtx));
146 	LOCKED(mtx, false);
147 }
148 
149 void
mutex_spin_enter(kmutex_t * mtx)150 mutex_spin_enter(kmutex_t *mtx)
151 {
152 
153 	WANTLOCK(mtx, 0);
154 	BARRIER(mtx, 1);
155 	rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
156 	LOCKED(mtx, false);
157 }
158 
159 int
mutex_tryenter(kmutex_t * mtx)160 mutex_tryenter(kmutex_t *mtx)
161 {
162 	int error;
163 
164 	error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
165 	if (error == 0) {
166 		WANTLOCK(mtx, 0);
167 		LOCKED(mtx, false);
168 	}
169 	return error == 0;
170 }
171 
172 void
mutex_exit(kmutex_t * mtx)173 mutex_exit(kmutex_t *mtx)
174 {
175 
176 	UNLOCKED(mtx, false);
177 	rumpuser_mutex_exit(RUMPMTX(mtx));
178 }
179 __strong_alias(mutex_spin_exit,mutex_exit);
180 
181 int
mutex_owned(kmutex_t * mtx)182 mutex_owned(kmutex_t *mtx)
183 {
184 
185 	return mutex_owner(mtx) == curlwp;
186 }
187 
188 struct lwp *
mutex_owner(kmutex_t * mtx)189 mutex_owner(kmutex_t *mtx)
190 {
191 	struct lwp *l;
192 
193 	rumpuser_mutex_owner(RUMPMTX(mtx), &l);
194 	return l;
195 }
196 
197 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
198 
199 /* reader/writer locks */
200 
201 static enum rumprwlock
krw2rumprw(const krw_t op)202 krw2rumprw(const krw_t op)
203 {
204 
205 	switch (op) {
206 	case RW_READER:
207 		return RUMPUSER_RW_READER;
208 	case RW_WRITER:
209 		return RUMPUSER_RW_WRITER;
210 	default:
211 		panic("unknown rwlock type");
212 	}
213 }
214 
215 void
rw_init(krwlock_t * rw)216 rw_init(krwlock_t *rw)
217 {
218 
219 	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
220 
221 	rumpuser_rw_init((struct rumpuser_rw **)rw);
222 	ALLOCK(rw, &rw_lockops);
223 }
224 
225 void
rw_destroy(krwlock_t * rw)226 rw_destroy(krwlock_t *rw)
227 {
228 
229 	FREELOCK(rw);
230 	rumpuser_rw_destroy(RUMPRW(rw));
231 }
232 
233 void
rw_enter(krwlock_t * rw,const krw_t op)234 rw_enter(krwlock_t *rw, const krw_t op)
235 {
236 
237 	WANTLOCK(rw, op == RW_READER);
238 	BARRIER(rw, 1);
239 	rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
240 	LOCKED(rw, op == RW_READER);
241 }
242 
243 int
rw_tryenter(krwlock_t * rw,const krw_t op)244 rw_tryenter(krwlock_t *rw, const krw_t op)
245 {
246 	int error;
247 
248 	error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
249 	if (error == 0) {
250 		WANTLOCK(rw, op == RW_READER);
251 		LOCKED(rw, op == RW_READER);
252 	}
253 	return error == 0;
254 }
255 
256 void
rw_exit(krwlock_t * rw)257 rw_exit(krwlock_t *rw)
258 {
259 
260 #ifdef LOCKDEBUG
261 	bool shared = !rw_write_held(rw);
262 
263 	if (shared)
264 		KASSERT(rw_read_held(rw));
265 	UNLOCKED(rw, shared);
266 #endif
267 	rumpuser_rw_exit(RUMPRW(rw));
268 }
269 
270 int
rw_tryupgrade(krwlock_t * rw)271 rw_tryupgrade(krwlock_t *rw)
272 {
273 	int rv;
274 
275 	rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
276 	if (rv == 0) {
277 		UNLOCKED(rw, 1);
278 		WANTLOCK(rw, 0);
279 		LOCKED(rw, 0);
280 	}
281 	return rv == 0;
282 }
283 
284 void
rw_downgrade(krwlock_t * rw)285 rw_downgrade(krwlock_t *rw)
286 {
287 
288 	rumpuser_rw_downgrade(RUMPRW(rw));
289 	UNLOCKED(rw, 0);
290 	WANTLOCK(rw, 1);
291 	LOCKED(rw, 1);
292 }
293 
294 int
rw_read_held(krwlock_t * rw)295 rw_read_held(krwlock_t *rw)
296 {
297 	int rv;
298 
299 	rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
300 	return rv;
301 }
302 
303 int
rw_write_held(krwlock_t * rw)304 rw_write_held(krwlock_t *rw)
305 {
306 	int rv;
307 
308 	rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
309 	return rv;
310 }
311 
312 int
rw_lock_held(krwlock_t * rw)313 rw_lock_held(krwlock_t *rw)
314 {
315 
316 	return rw_read_held(rw) || rw_write_held(rw);
317 }
318 
319 /* curriculum vitaes */
320 
321 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
322 
323 void
cv_init(kcondvar_t * cv,const char * msg)324 cv_init(kcondvar_t *cv, const char *msg)
325 {
326 
327 	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
328 
329 	rumpuser_cv_init((struct rumpuser_cv **)cv);
330 }
331 
332 void
cv_destroy(kcondvar_t * cv)333 cv_destroy(kcondvar_t *cv)
334 {
335 
336 	rumpuser_cv_destroy(RUMPCV(cv));
337 }
338 
339 static int
docvwait(kcondvar_t * cv,kmutex_t * mtx,struct timespec * ts)340 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
341 {
342 	struct lwp *l = curlwp;
343 	int rv;
344 
345 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
346 		/*
347 		 * yield() here, someone might want the cpu
348 		 * to set a condition.  otherwise we'll just
349 		 * loop forever.
350 		 */
351 		yield();
352 		return EINTR;
353 	}
354 
355 	UNLOCKED(mtx, false);
356 
357 	l->l_private = cv;
358 	rv = 0;
359 	if (ts) {
360 		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
361 		    ts->tv_sec, ts->tv_nsec))
362 			rv = EWOULDBLOCK;
363 	} else {
364 		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
365 	}
366 
367 	LOCKED(mtx, false);
368 
369 	/*
370 	 * Check for QEXIT.  if so, we need to wait here until we
371 	 * are allowed to exit.
372 	 */
373 	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
374 		struct proc *p = l->l_proc;
375 
376 		mutex_exit(mtx); /* drop and retake later */
377 
378 		mutex_enter(p->p_lock);
379 		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
380 			/* avoid recursion */
381 			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
382 			    RUMPMTX(p->p_lock));
383 		}
384 		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
385 		mutex_exit(p->p_lock);
386 
387 		/* ok, we can exit and remove "reference" to l->private */
388 
389 		mutex_enter(mtx);
390 		rv = EINTR;
391 	}
392 	l->l_private = NULL;
393 
394 	return rv;
395 }
396 
397 void
cv_wait(kcondvar_t * cv,kmutex_t * mtx)398 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
399 {
400 
401 	if (__predict_false(rump_threads == 0))
402 		panic("cv_wait without threads");
403 	(void) docvwait(cv, mtx, NULL);
404 }
405 
406 int
cv_wait_sig(kcondvar_t * cv,kmutex_t * mtx)407 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
408 {
409 
410 	if (__predict_false(rump_threads == 0))
411 		panic("cv_wait without threads");
412 	return docvwait(cv, mtx, NULL);
413 }
414 
415 int
cv_timedwait(kcondvar_t * cv,kmutex_t * mtx,int ticks)416 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
417 {
418 	struct timespec ts;
419 	extern int hz;
420 	int rv;
421 
422 	if (ticks == 0) {
423 		rv = cv_wait_sig(cv, mtx);
424 	} else {
425 		ts.tv_sec = ticks / hz;
426 		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
427 		rv = docvwait(cv, mtx, &ts);
428 	}
429 
430 	return rv;
431 }
432 __strong_alias(cv_timedwait_sig,cv_timedwait);
433 
434 void
cv_signal(kcondvar_t * cv)435 cv_signal(kcondvar_t *cv)
436 {
437 
438 	rumpuser_cv_signal(RUMPCV(cv));
439 }
440 
441 void
cv_broadcast(kcondvar_t * cv)442 cv_broadcast(kcondvar_t *cv)
443 {
444 
445 	rumpuser_cv_broadcast(RUMPCV(cv));
446 }
447 
448 bool
cv_has_waiters(kcondvar_t * cv)449 cv_has_waiters(kcondvar_t *cv)
450 {
451 	int rv;
452 
453 	rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
454 	return rv != 0;
455 }
456 
457 /* this is not much of an attempt, but ... */
458 bool
cv_is_valid(kcondvar_t * cv)459 cv_is_valid(kcondvar_t *cv)
460 {
461 
462 	return RUMPCV(cv) != NULL;
463 }
464