1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.15 2008/05/09 16:03:27 dillon Exp $
34  */
35 
36 #include "namespace.h"
37 #include <machine/tls.h>
38 
39 #include <errno.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <sys/queue.h>
43 #include <pthread.h>
44 #include "un-namespace.h"
45 
46 #include "thr_private.h"
47 
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) 		do {		\
50 	(m)->m_qe.tqe_prev = NULL;			\
51 	(m)->m_qe.tqe_next = NULL;			\
52 } while (0)
53 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
54 	if ((m)->m_qe.tqe_prev == NULL)			\
55 		PANIC("mutex is not on list");		\
56 } while (0)
57 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
58 	if (((m)->m_qe.tqe_prev != NULL) ||		\
59 	    ((m)->m_qe.tqe_next != NULL))		\
60 		PANIC("mutex is on list");		\
61 } while (0)
62 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
63 	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
64 	    "thread in syncq when it shouldn't be.");	\
65 } while (0);
66 #else
67 #define MUTEX_INIT_LINK(m)
68 #define MUTEX_ASSERT_IS_OWNED(m)
69 #define MUTEX_ASSERT_NOT_OWNED(m)
70 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)
71 #endif
72 
73 #define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
74 #define	MUTEX_DESTROY(m) do {		\
75 	free(m);			\
76 } while (0)
77 
78 umtx_t	_mutex_static_lock;
79 
80 /*
81  * Prototypes
82  */
83 static int	mutex_self_trylock(pthread_mutex_t);
84 static int	mutex_self_lock(pthread_mutex_t,
85 			const struct timespec *abstime);
86 static int	mutex_unlock_common(pthread_mutex_t *);
87 
88 int __pthread_mutex_init(pthread_mutex_t *mutex,
89 	const pthread_mutexattr_t *mutex_attr);
90 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
91 int __pthread_mutex_lock(pthread_mutex_t *mutex);
92 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
93 	const struct timespec *abs_timeout);
94 
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97     const pthread_mutexattr_t *mutex_attr, int private)
98 {
99 	const struct pthread_mutex_attr *attr;
100 	struct pthread_mutex *pmutex;
101 
102 	if (mutex_attr == NULL) {
103 		attr = &_pthread_mutexattr_default;
104 	} else {
105 		attr = *mutex_attr;
106 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 		    attr->m_type >= MUTEX_TYPE_MAX)
108 			return (EINVAL);
109 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 			return (EINVAL);
112 	}
113 
114 	if ((pmutex = (pthread_mutex_t)
115 		malloc(sizeof(struct pthread_mutex))) == NULL)
116 		return (ENOMEM);
117 
118 	_thr_umtx_init(&pmutex->m_lock);
119 	pmutex->m_type = attr->m_type;
120 	pmutex->m_protocol = attr->m_protocol;
121 	TAILQ_INIT(&pmutex->m_queue);
122 	pmutex->m_owner = NULL;
123 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
124 	if (private)
125 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
126 	pmutex->m_count = 0;
127 	pmutex->m_refcount = 0;
128 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
129 		pmutex->m_prio = attr->m_ceiling;
130 	else
131 		pmutex->m_prio = -1;
132 	pmutex->m_saved_prio = 0;
133 	MUTEX_INIT_LINK(pmutex);
134 	*mutex = pmutex;
135 	return (0);
136 }
137 
138 static int
139 init_static(struct pthread *thread, pthread_mutex_t *mutex)
140 {
141 	int ret;
142 
143 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
144 
145 	if (*mutex == NULL)
146 		ret = mutex_init(mutex, NULL, 0);
147 	else
148 		ret = 0;
149 
150 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
151 
152 	return (ret);
153 }
154 
155 static int
156 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
157 {
158 	int ret;
159 
160 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
161 
162 	if (*mutex == NULL)
163 		ret = mutex_init(mutex, NULL, 1);
164 	else
165 		ret = 0;
166 
167 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
168 
169 	return (ret);
170 }
171 
172 int
173 _pthread_mutex_init(pthread_mutex_t *mutex,
174     const pthread_mutexattr_t *mutex_attr)
175 {
176 	return mutex_init(mutex, mutex_attr, 1);
177 }
178 
179 int
180 __pthread_mutex_init(pthread_mutex_t *mutex,
181     const pthread_mutexattr_t *mutex_attr)
182 {
183 	return mutex_init(mutex, mutex_attr, 0);
184 }
185 
186 int
187 _mutex_reinit(pthread_mutex_t *mutex)
188 {
189 	_thr_umtx_init(&(*mutex)->m_lock);
190 	TAILQ_INIT(&(*mutex)->m_queue);
191 	MUTEX_INIT_LINK(*mutex);
192 	(*mutex)->m_owner = NULL;
193 	(*mutex)->m_count = 0;
194 	(*mutex)->m_refcount = 0;
195 	(*mutex)->m_prio = 0;
196 	(*mutex)->m_saved_prio = 0;
197 	return (0);
198 }
199 
200 void
201 _mutex_fork(struct pthread *curthread)
202 {
203 	struct pthread_mutex *m;
204 
205 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
206 		m->m_lock = UMTX_LOCKED;
207 }
208 
209 int
210 _pthread_mutex_destroy(pthread_mutex_t *mutex)
211 {
212 	struct pthread *curthread = tls_get_curthread();
213 	pthread_mutex_t m;
214 	int ret = 0;
215 
216 	if (mutex == NULL || *mutex == NULL)
217 		ret = EINVAL;
218 	else {
219 		/*
220 		 * Try to lock the mutex structure, we only need to
221 		 * try once, if failed, the mutex is in used.
222 		 */
223 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
224 		if (ret)
225 			return (ret);
226 
227 		/*
228 		 * Check mutex other fields to see if this mutex is
229 		 * in use. Mostly for prority mutex types, or there
230 		 * are condition variables referencing it.
231 		 */
232 		if (((*mutex)->m_owner != NULL) ||
233 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
234 		    ((*mutex)->m_refcount != 0)) {
235 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
236 			ret = EBUSY;
237 		} else {
238 			/*
239 			 * Save a pointer to the mutex so it can be free'd
240 			 * and set the caller's pointer to NULL:
241 			 */
242 			m = *mutex;
243 			*mutex = NULL;
244 
245 			/* Unlock the mutex structure: */
246 			THR_UMTX_UNLOCK(curthread, &m->m_lock);
247 
248 			/*
249 			 * Free the memory allocated for the mutex
250 			 * structure:
251 			 */
252 			MUTEX_ASSERT_NOT_OWNED(m);
253 			MUTEX_DESTROY(m);
254 		}
255 	}
256 
257 	/* Return the completion status: */
258 	return (ret);
259 }
260 
261 static int
262 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
263 {
264 	struct pthread_mutex *m;
265 	int ret;
266 
267 	m = *mutex;
268 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
269 	if (ret == 0) {
270 		m->m_owner = curthread;
271 		/* Add to the list of owned mutexes: */
272 		MUTEX_ASSERT_NOT_OWNED(m);
273 		TAILQ_INSERT_TAIL(&curthread->mutexq,
274 		    m, m_qe);
275 	} else if (m->m_owner == curthread) {
276 		ret = mutex_self_trylock(m);
277 	} /* else {} */
278 
279 	return (ret);
280 }
281 
282 int
283 __pthread_mutex_trylock(pthread_mutex_t *m)
284 {
285 	struct pthread *curthread = tls_get_curthread();
286 	int ret;
287 
288 	if (__predict_false(m == NULL))
289 		return(EINVAL);
290 	/*
291 	 * If the mutex is statically initialized, perform the dynamic
292 	 * initialization:
293 	 */
294 	if (__predict_false(*m == NULL)) {
295 		ret = init_static(curthread, m);
296 		if (__predict_false(ret != 0))
297 			return (ret);
298 	}
299 	return (mutex_trylock_common(curthread, m));
300 }
301 
302 int
303 _pthread_mutex_trylock(pthread_mutex_t *m)
304 {
305 	struct pthread	*curthread = tls_get_curthread();
306 	int	ret = 0;
307 
308 	/*
309 	 * If the mutex is statically initialized, perform the dynamic
310 	 * initialization marking the mutex private (delete safe):
311 	 */
312 	if (__predict_false(*m == NULL)) {
313 		ret = init_static_private(curthread, m);
314 		if (__predict_false(ret != 0))
315 			return (ret);
316 	}
317 	return (mutex_trylock_common(curthread, m));
318 }
319 
320 static int
321 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
322 	const struct timespec * abstime)
323 {
324 	struct  timespec ts, ts2;
325 	struct  pthread_mutex *m;
326 	int	ret = 0;
327 
328 	m = *mutex;
329 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
330 	if (ret == 0) {
331 		m->m_owner = curthread;
332 		/* Add to the list of owned mutexes: */
333 		MUTEX_ASSERT_NOT_OWNED(m);
334 		TAILQ_INSERT_TAIL(&curthread->mutexq,
335 		    m, m_qe);
336 	} else if (m->m_owner == curthread) {
337 		ret = mutex_self_lock(m, abstime);
338 	} else {
339 		if (abstime == NULL) {
340 			THR_UMTX_LOCK(curthread, &m->m_lock);
341 			ret = 0;
342 		} else if (__predict_false(
343 			abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
344 			abstime->tv_nsec >= 1000000000)) {
345 				ret = EINVAL;
346 		} else {
347 			clock_gettime(CLOCK_REALTIME, &ts);
348 			TIMESPEC_SUB(&ts2, abstime, &ts);
349 			ret = THR_UMTX_TIMEDLOCK(curthread,
350 				&m->m_lock, &ts2);
351 			/*
352 			 * Timed out wait is not restarted if
353 			 * it was interrupted, not worth to do it.
354 			 */
355 			if (ret == EINTR)
356 				ret = ETIMEDOUT;
357 		}
358 		if (ret == 0) {
359 			m->m_owner = curthread;
360 			/* Add to the list of owned mutexes: */
361 			MUTEX_ASSERT_NOT_OWNED(m);
362 			TAILQ_INSERT_TAIL(&curthread->mutexq,
363 			    m, m_qe);
364 		}
365 	}
366 	return (ret);
367 }
368 
369 int
370 __pthread_mutex_lock(pthread_mutex_t *m)
371 {
372 	struct pthread *curthread;
373 	int	ret;
374 
375 	_thr_check_init();
376 
377 	if (__predict_false(m == NULL))
378 		return(EINVAL);
379 
380 	/*
381 	 * If the mutex is statically initialized, perform the dynamic
382 	 * initialization:
383 	 */
384 	curthread = tls_get_curthread();
385 	if (__predict_false(*m == NULL)) {
386 		ret = init_static(curthread, m);
387 		if (__predict_false(ret))
388 			return (ret);
389 	}
390 	return (mutex_lock_common(curthread, m, NULL));
391 }
392 
393 int
394 _pthread_mutex_lock(pthread_mutex_t *m)
395 {
396 	struct pthread *curthread;
397 	int	ret;
398 
399 	_thr_check_init();
400 
401 	if (__predict_false(m == NULL))
402 		return(EINVAL);
403 
404 	/*
405 	 * If the mutex is statically initialized, perform the dynamic
406 	 * initialization marking it private (delete safe):
407 	 */
408 	curthread = tls_get_curthread();
409 	if (__predict_false(*m == NULL)) {
410 		ret = init_static_private(curthread, m);
411 		if (__predict_false(ret))
412 			return (ret);
413 	}
414 	return (mutex_lock_common(curthread, m, NULL));
415 }
416 
417 int
418 __pthread_mutex_timedlock(pthread_mutex_t *m,
419 	const struct timespec *abs_timeout)
420 {
421 	struct pthread *curthread;
422 	int	ret;
423 
424 	_thr_check_init();
425 
426 	if (__predict_false(m == NULL))
427 		return(EINVAL);
428 
429 	/*
430 	 * If the mutex is statically initialized, perform the dynamic
431 	 * initialization:
432 	 */
433 	curthread = tls_get_curthread();
434 	if (__predict_false(*m == NULL)) {
435 		ret = init_static(curthread, m);
436 		if (__predict_false(ret))
437 			return (ret);
438 	}
439 	return (mutex_lock_common(curthread, m, abs_timeout));
440 }
441 
442 int
443 _pthread_mutex_timedlock(pthread_mutex_t *m,
444 	const struct timespec *abs_timeout)
445 {
446 	struct pthread *curthread;
447 	int	ret;
448 
449 	_thr_check_init();
450 
451 	if (__predict_false(m == NULL))
452 		return(EINVAL);
453 
454 	curthread = tls_get_curthread();
455 
456 	/*
457 	 * If the mutex is statically initialized, perform the dynamic
458 	 * initialization marking it private (delete safe):
459 	 */
460 	if (__predict_false(*m == NULL)) {
461 		ret = init_static_private(curthread, m);
462 		if (__predict_false(ret))
463 			return (ret);
464 	}
465 	return (mutex_lock_common(curthread, m, abs_timeout));
466 }
467 
468 int
469 _pthread_mutex_unlock(pthread_mutex_t *m)
470 {
471 	if (__predict_false(m == NULL))
472 		return(EINVAL);
473 	return (mutex_unlock_common(m));
474 }
475 
476 static int
477 mutex_self_trylock(pthread_mutex_t m)
478 {
479 	int	ret;
480 
481 	switch (m->m_type) {
482 	/* case PTHREAD_MUTEX_DEFAULT: */
483 	case PTHREAD_MUTEX_ERRORCHECK:
484 	case PTHREAD_MUTEX_NORMAL:
485 		ret = EBUSY;
486 		break;
487 
488 	case PTHREAD_MUTEX_RECURSIVE:
489 		/* Increment the lock count: */
490 		if (m->m_count + 1 > 0) {
491 			m->m_count++;
492 			ret = 0;
493 		} else
494 			ret = EAGAIN;
495 		break;
496 
497 	default:
498 		/* Trap invalid mutex types; */
499 		ret = EINVAL;
500 	}
501 
502 	return (ret);
503 }
504 
505 static int
506 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
507 {
508 	struct timespec ts1, ts2;
509 	int ret;
510 
511 	switch (m->m_type) {
512 	/* case PTHREAD_MUTEX_DEFAULT: */
513 	case PTHREAD_MUTEX_ERRORCHECK:
514 		if (abstime) {
515 			clock_gettime(CLOCK_REALTIME, &ts1);
516 			TIMESPEC_SUB(&ts2, abstime, &ts1);
517 			__sys_nanosleep(&ts2, NULL);
518 			ret = ETIMEDOUT;
519 		} else {
520 			/*
521 			 * POSIX specifies that mutexes should return
522 			 * EDEADLK if a recursive lock is detected.
523 			 */
524 			ret = EDEADLK;
525 		}
526 		break;
527 
528 	case PTHREAD_MUTEX_NORMAL:
529 		/*
530 		 * What SS2 define as a 'normal' mutex.  Intentionally
531 		 * deadlock on attempts to get a lock you already own.
532 		 */
533 		ret = 0;
534 		if (abstime) {
535 			clock_gettime(CLOCK_REALTIME, &ts1);
536 			TIMESPEC_SUB(&ts2, abstime, &ts1);
537 			__sys_nanosleep(&ts2, NULL);
538 			ret = ETIMEDOUT;
539 		} else {
540 			ts1.tv_sec = 30;
541 			ts1.tv_nsec = 0;
542 			for (;;)
543 				__sys_nanosleep(&ts1, NULL);
544 		}
545 		break;
546 
547 	case PTHREAD_MUTEX_RECURSIVE:
548 		/* Increment the lock count: */
549 		if (m->m_count + 1 > 0) {
550 			m->m_count++;
551 			ret = 0;
552 		} else
553 			ret = EAGAIN;
554 		break;
555 
556 	default:
557 		/* Trap invalid mutex types; */
558 		ret = EINVAL;
559 	}
560 
561 	return (ret);
562 }
563 
564 static int
565 mutex_unlock_common(pthread_mutex_t *mutex)
566 {
567 	struct pthread *curthread = tls_get_curthread();
568 	struct pthread_mutex *m;
569 
570 	if (__predict_false((m = *mutex)== NULL))
571 		return (EINVAL);
572 	if (__predict_false(m->m_owner != curthread))
573 		return (EPERM);
574 
575 	if (__predict_false(
576 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
577 		m->m_count > 0)) {
578 		m->m_count--;
579 	} else {
580 		/*
581 		 * Clear the count in case this is a recursive mutex.
582 		 */
583 		m->m_count = 0;
584 		m->m_owner = NULL;
585 		/* Remove the mutex from the threads queue. */
586 		MUTEX_ASSERT_IS_OWNED(m);
587 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
588 		MUTEX_INIT_LINK(m);
589 		/*
590 		 * Hand off the mutex to the next waiting thread.
591 		 */
592 		THR_UMTX_UNLOCK(curthread, &m->m_lock);
593 	}
594 	return (0);
595 }
596 
597 int
598 _mutex_cv_lock(pthread_mutex_t *m, int count)
599 {
600 	int	ret;
601 
602 	if ((ret = _pthread_mutex_lock(m)) == 0) {
603 		(*m)->m_refcount--;
604 		(*m)->m_count += count;
605 	}
606 	return (ret);
607 }
608 
609 int
610 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
611 {
612 	struct pthread *curthread = tls_get_curthread();
613 	struct pthread_mutex *m;
614 
615 	if (__predict_false(mutex == NULL))
616 		return (EINVAL);
617 	if (__predict_false((m = *mutex) == NULL))
618 		return (EINVAL);
619 	if (__predict_false(m->m_owner != curthread))
620 		return (EPERM);
621 
622 	*count = m->m_count;
623 	m->m_count = 0;
624 	m->m_refcount++;
625 	m->m_owner = NULL;
626 	/* Remove the mutex from the threads queue. */
627 	MUTEX_ASSERT_IS_OWNED(m);
628 	TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
629 	MUTEX_INIT_LINK(m);
630 	THR_UMTX_UNLOCK(curthread, &m->m_lock);
631 	return (0);
632 }
633 
634 void
635 _mutex_unlock_private(pthread_t pthread)
636 {
637 	struct pthread_mutex	*m, *m_next;
638 
639 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
640 		m_next = TAILQ_NEXT(m, m_qe);
641 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
642 			_pthread_mutex_unlock(&m);
643 	}
644 }
645 
646 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
647 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
648 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
649 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
650 
651 /* Single underscore versions provided for libc internal usage: */
652 /* No difference between libc and application usage of these: */
653 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
654 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
655