xref: /openbsd/lib/libc/thread/rthread_mutex.c (revision d415bd75)
1 /*	$OpenBSD: rthread_mutex.c,v 1.5 2019/02/13 13:09:32 mpi Exp $ */
2 /*
3  * Copyright (c) 2017 Martin Pieuchot <mpi@openbsd.org>
4  * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <errno.h>
20 #include <pthread.h>
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <unistd.h>
25 
26 #include "rthread.h"
27 #include "cancel.h"
28 #include "synch.h"
29 
30 /*
31  * States defined in "Futexes Are Tricky" 5.2
32  */
33 enum {
34 	UNLOCKED = 0,
35 	LOCKED = 1,	/* locked without waiter */
36 	CONTENDED = 2,	/* threads waiting for this mutex */
37 };
38 
39 #define SPIN_COUNT	128
40 #if defined(__i386__) || defined(__amd64__)
41 #define SPIN_WAIT()	asm volatile("pause": : : "memory")
42 #else
43 #define SPIN_WAIT()	do { } while (0)
44 #endif
45 
46 static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
47 
48 int
49 pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
50 {
51 	pthread_mutex_t mutex;
52 
53 	mutex = calloc(1, sizeof(*mutex));
54 	if (mutex == NULL)
55 		return (ENOMEM);
56 
57 	if (attr == NULL) {
58 		mutex->type = PTHREAD_MUTEX_DEFAULT;
59 		mutex->prioceiling = -1;
60 	} else {
61 		mutex->type = (*attr)->ma_type;
62 		mutex->prioceiling = (*attr)->ma_protocol ==
63 		    PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
64 	}
65 	*mutexp = mutex;
66 
67 	return (0);
68 }
69 DEF_STRONG(pthread_mutex_init);
70 
71 int
72 pthread_mutex_destroy(pthread_mutex_t *mutexp)
73 {
74 	pthread_mutex_t mutex;
75 
76 	if (mutexp == NULL || *mutexp == NULL)
77 		return (EINVAL);
78 
79 	mutex = *mutexp;
80 	if (mutex) {
81 		if (mutex->lock != UNLOCKED) {
82 #define MSG "pthread_mutex_destroy on mutex with waiters!\n"
83 			write(2, MSG, sizeof(MSG) - 1);
84 #undef MSG
85 			return (EBUSY);
86 		}
87 		free((void *)mutex);
88 		*mutexp = NULL;
89 	}
90 
91 	return (0);
92 }
93 DEF_STRONG(pthread_mutex_destroy);
94 
95 static int
96 _rthread_mutex_trylock(pthread_mutex_t mutex, int trywait,
97     const struct timespec *abs)
98 {
99 	pthread_t self = pthread_self();
100 
101 	if (atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED) == UNLOCKED) {
102 		membar_enter_after_atomic();
103 		mutex->owner = self;
104 		return (0);
105 	}
106 
107 	if (mutex->owner == self) {
108 		int type = mutex->type;
109 
110 		/* already owner?  handle recursive behavior */
111 		if (type != PTHREAD_MUTEX_RECURSIVE) {
112 			if (trywait || type == PTHREAD_MUTEX_ERRORCHECK)
113 				return (trywait ? EBUSY : EDEADLK);
114 
115 			/* self-deadlock is disallowed by strict */
116 			if (type == PTHREAD_MUTEX_STRICT_NP && abs == NULL)
117 				abort();
118 
119 			/* self-deadlock, possibly until timeout */
120 			while (_twait(&mutex->type, type, CLOCK_REALTIME,
121 			    abs) != ETIMEDOUT)
122 				;
123 			return (ETIMEDOUT);
124 		} else {
125 			if (mutex->count == INT_MAX)
126 				return (EAGAIN);
127 			mutex->count++;
128 			return (0);
129 		}
130 	}
131 
132 	return (EBUSY);
133 }
134 
135 static int
136 _rthread_mutex_timedlock(pthread_mutex_t *mutexp, int trywait,
137     const struct timespec *abs, int timed)
138 {
139 	pthread_t self = pthread_self();
140 	pthread_mutex_t mutex;
141 	unsigned int i, lock;
142 	int error = 0;
143 
144 	if (mutexp == NULL)
145 		return (EINVAL);
146 
147 	/*
148 	 * If the mutex is statically initialized, perform the dynamic
149 	 * initialization. Note: _thread_mutex_lock() in libc requires
150 	 * pthread_mutex_lock() to perform the mutex init when *mutexp
151 	 * is NULL.
152 	 */
153 	if (*mutexp == NULL) {
154 		_spinlock(&static_init_lock);
155 		if (*mutexp == NULL)
156 			error = pthread_mutex_init(mutexp, NULL);
157 		_spinunlock(&static_init_lock);
158 		if (error != 0)
159 			return (EINVAL);
160 	}
161 
162 	mutex = *mutexp;
163 	_rthread_debug(5, "%p: mutex_%slock %p (%p)\n", self,
164 	    (timed ? "timed" : (trywait ? "try" : "")), (void *)mutex,
165 	    (void *)mutex->owner);
166 
167 	error = _rthread_mutex_trylock(mutex, trywait, abs);
168 	if (error != EBUSY || trywait)
169 		return (error);
170 
171 	/* Try hard to not enter the kernel. */
172 	for (i = 0; i < SPIN_COUNT; i++) {
173 		if (mutex->lock == UNLOCKED)
174 			break;
175 
176 		SPIN_WAIT();
177 	}
178 
179 	lock = atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED);
180 	if (lock == UNLOCKED) {
181 		membar_enter_after_atomic();
182 		mutex->owner = self;
183 		return (0);
184 	}
185 
186 	if (lock != CONTENDED) {
187 		/* Indicate that we're waiting on this mutex. */
188 		lock = atomic_swap_uint(&mutex->lock, CONTENDED);
189 	}
190 
191 	while (lock != UNLOCKED) {
192 		error = _twait(&mutex->lock, CONTENDED, CLOCK_REALTIME, abs);
193 		if (error == ETIMEDOUT)
194 			return (error);
195 		/*
196 		 * We cannot know if there's another waiter, so in
197 		 * doubt set the state to CONTENDED.
198 		 */
199 		lock = atomic_swap_uint(&mutex->lock, CONTENDED);
200 	};
201 
202 	membar_enter_after_atomic();
203 	mutex->owner = self;
204 	return (0);
205 }
206 
207 int
208 pthread_mutex_trylock(pthread_mutex_t *mutexp)
209 {
210 	return (_rthread_mutex_timedlock(mutexp, 1, NULL, 0));
211 }
212 
213 int
214 pthread_mutex_timedlock(pthread_mutex_t *mutexp, const struct timespec *abs)
215 {
216 	return (_rthread_mutex_timedlock(mutexp, 0, abs, 1));
217 }
218 
219 int
220 pthread_mutex_lock(pthread_mutex_t *mutexp)
221 {
222 	return (_rthread_mutex_timedlock(mutexp, 0, NULL, 0));
223 }
224 DEF_STRONG(pthread_mutex_lock);
225 
226 int
227 pthread_mutex_unlock(pthread_mutex_t *mutexp)
228 {
229 	pthread_t self = pthread_self();
230 	pthread_mutex_t mutex;
231 
232 	if (mutexp == NULL)
233 		return (EINVAL);
234 
235 	if (*mutexp == NULL)
236 #if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
237 		return (EPERM);
238 #elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
239 		return(0);
240 #else
241 		abort();
242 #endif
243 
244 	mutex = *mutexp;
245 	_rthread_debug(5, "%p: mutex_unlock %p (%p)\n", self, (void *)mutex,
246 	    (void *)mutex->owner);
247 
248 	if (mutex->owner != self) {
249 	_rthread_debug(5, "%p: different owner %p (%p)\n", self, (void *)mutex,
250 	    (void *)mutex->owner);
251 		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
252 		    mutex->type == PTHREAD_MUTEX_RECURSIVE) {
253 			return (EPERM);
254 		} else {
255 			/*
256 			 * For mutex type NORMAL our undefined behavior for
257 			 * unlocking an unlocked mutex is to succeed without
258 			 * error.  All other undefined behaviors are to
259 			 * abort() immediately.
260 			 */
261 			if (mutex->owner == NULL &&
262 			    mutex->type == PTHREAD_MUTEX_NORMAL)
263 				return (0);
264 			else
265 				abort();
266 
267 		}
268 	}
269 
270 	if (mutex->type == PTHREAD_MUTEX_RECURSIVE) {
271 		if (mutex->count > 0) {
272 			mutex->count--;
273 			return (0);
274 		}
275 	}
276 
277 	mutex->owner = NULL;
278 	membar_exit_before_atomic();
279 	if (atomic_dec_int_nv(&mutex->lock) != UNLOCKED) {
280 		mutex->lock = UNLOCKED;
281 		_wake(&mutex->lock, 1);
282 	}
283 
284 	return (0);
285 }
286 DEF_STRONG(pthread_mutex_unlock);
287