xref: /qemu/util/qemu-thread-posix.c (revision 814bb12a)
1 /*
2  * Wrappers around mutex/cond/thread functions
3  *
4  * Copyright Red Hat, Inc. 2009
5  *
6  * Author:
7  *  Marcelo Tosatti <mtosatti@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 #include "qemu/osdep.h"
14 #ifdef __linux__
15 #include <sys/syscall.h>
16 #include <linux/futex.h>
17 #endif
18 #include "qemu/thread.h"
19 #include "qemu/atomic.h"
20 #include "qemu/notify.h"
21 
22 static bool name_threads;
23 
24 void qemu_thread_naming(bool enable)
25 {
26     name_threads = enable;
27 
28 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
29     /* This is a debugging option, not fatal */
30     if (enable) {
31         fprintf(stderr, "qemu: thread naming not supported on this host\n");
32     }
33 #endif
34 }
35 
36 static void error_exit(int err, const char *msg)
37 {
38     fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
39     abort();
40 }
41 
42 void qemu_mutex_init(QemuMutex *mutex)
43 {
44     int err;
45 
46     err = pthread_mutex_init(&mutex->lock, NULL);
47     if (err)
48         error_exit(err, __func__);
49 }
50 
51 void qemu_mutex_destroy(QemuMutex *mutex)
52 {
53     int err;
54 
55     err = pthread_mutex_destroy(&mutex->lock);
56     if (err)
57         error_exit(err, __func__);
58 }
59 
60 void qemu_mutex_lock(QemuMutex *mutex)
61 {
62     int err;
63 
64     err = pthread_mutex_lock(&mutex->lock);
65     if (err)
66         error_exit(err, __func__);
67 }
68 
69 int qemu_mutex_trylock(QemuMutex *mutex)
70 {
71     return pthread_mutex_trylock(&mutex->lock);
72 }
73 
74 void qemu_mutex_unlock(QemuMutex *mutex)
75 {
76     int err;
77 
78     err = pthread_mutex_unlock(&mutex->lock);
79     if (err)
80         error_exit(err, __func__);
81 }
82 
83 void qemu_cond_init(QemuCond *cond)
84 {
85     int err;
86 
87     err = pthread_cond_init(&cond->cond, NULL);
88     if (err)
89         error_exit(err, __func__);
90 }
91 
92 void qemu_cond_destroy(QemuCond *cond)
93 {
94     int err;
95 
96     err = pthread_cond_destroy(&cond->cond);
97     if (err)
98         error_exit(err, __func__);
99 }
100 
101 void qemu_cond_signal(QemuCond *cond)
102 {
103     int err;
104 
105     err = pthread_cond_signal(&cond->cond);
106     if (err)
107         error_exit(err, __func__);
108 }
109 
110 void qemu_cond_broadcast(QemuCond *cond)
111 {
112     int err;
113 
114     err = pthread_cond_broadcast(&cond->cond);
115     if (err)
116         error_exit(err, __func__);
117 }
118 
119 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
120 {
121     int err;
122 
123     err = pthread_cond_wait(&cond->cond, &mutex->lock);
124     if (err)
125         error_exit(err, __func__);
126 }
127 
128 void qemu_sem_init(QemuSemaphore *sem, int init)
129 {
130     int rc;
131 
132 #if defined(__APPLE__) || defined(__NetBSD__)
133     rc = pthread_mutex_init(&sem->lock, NULL);
134     if (rc != 0) {
135         error_exit(rc, __func__);
136     }
137     rc = pthread_cond_init(&sem->cond, NULL);
138     if (rc != 0) {
139         error_exit(rc, __func__);
140     }
141     if (init < 0) {
142         error_exit(EINVAL, __func__);
143     }
144     sem->count = init;
145 #else
146     rc = sem_init(&sem->sem, 0, init);
147     if (rc < 0) {
148         error_exit(errno, __func__);
149     }
150 #endif
151 }
152 
153 void qemu_sem_destroy(QemuSemaphore *sem)
154 {
155     int rc;
156 
157 #if defined(__APPLE__) || defined(__NetBSD__)
158     rc = pthread_cond_destroy(&sem->cond);
159     if (rc < 0) {
160         error_exit(rc, __func__);
161     }
162     rc = pthread_mutex_destroy(&sem->lock);
163     if (rc < 0) {
164         error_exit(rc, __func__);
165     }
166 #else
167     rc = sem_destroy(&sem->sem);
168     if (rc < 0) {
169         error_exit(errno, __func__);
170     }
171 #endif
172 }
173 
174 void qemu_sem_post(QemuSemaphore *sem)
175 {
176     int rc;
177 
178 #if defined(__APPLE__) || defined(__NetBSD__)
179     pthread_mutex_lock(&sem->lock);
180     if (sem->count == UINT_MAX) {
181         rc = EINVAL;
182     } else {
183         sem->count++;
184         rc = pthread_cond_signal(&sem->cond);
185     }
186     pthread_mutex_unlock(&sem->lock);
187     if (rc != 0) {
188         error_exit(rc, __func__);
189     }
190 #else
191     rc = sem_post(&sem->sem);
192     if (rc < 0) {
193         error_exit(errno, __func__);
194     }
195 #endif
196 }
197 
198 static void compute_abs_deadline(struct timespec *ts, int ms)
199 {
200     struct timeval tv;
201     gettimeofday(&tv, NULL);
202     ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
203     ts->tv_sec = tv.tv_sec + ms / 1000;
204     if (ts->tv_nsec >= 1000000000) {
205         ts->tv_sec++;
206         ts->tv_nsec -= 1000000000;
207     }
208 }
209 
210 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
211 {
212     int rc;
213     struct timespec ts;
214 
215 #if defined(__APPLE__) || defined(__NetBSD__)
216     rc = 0;
217     compute_abs_deadline(&ts, ms);
218     pthread_mutex_lock(&sem->lock);
219     while (sem->count == 0) {
220         rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
221         if (rc == ETIMEDOUT) {
222             break;
223         }
224         if (rc != 0) {
225             error_exit(rc, __func__);
226         }
227     }
228     if (rc != ETIMEDOUT) {
229         --sem->count;
230     }
231     pthread_mutex_unlock(&sem->lock);
232     return (rc == ETIMEDOUT ? -1 : 0);
233 #else
234     if (ms <= 0) {
235         /* This is cheaper than sem_timedwait.  */
236         do {
237             rc = sem_trywait(&sem->sem);
238         } while (rc == -1 && errno == EINTR);
239         if (rc == -1 && errno == EAGAIN) {
240             return -1;
241         }
242     } else {
243         compute_abs_deadline(&ts, ms);
244         do {
245             rc = sem_timedwait(&sem->sem, &ts);
246         } while (rc == -1 && errno == EINTR);
247         if (rc == -1 && errno == ETIMEDOUT) {
248             return -1;
249         }
250     }
251     if (rc < 0) {
252         error_exit(errno, __func__);
253     }
254     return 0;
255 #endif
256 }
257 
258 void qemu_sem_wait(QemuSemaphore *sem)
259 {
260     int rc;
261 
262 #if defined(__APPLE__) || defined(__NetBSD__)
263     pthread_mutex_lock(&sem->lock);
264     while (sem->count == 0) {
265         rc = pthread_cond_wait(&sem->cond, &sem->lock);
266         if (rc != 0) {
267             error_exit(rc, __func__);
268         }
269     }
270     --sem->count;
271     pthread_mutex_unlock(&sem->lock);
272 #else
273     do {
274         rc = sem_wait(&sem->sem);
275     } while (rc == -1 && errno == EINTR);
276     if (rc < 0) {
277         error_exit(errno, __func__);
278     }
279 #endif
280 }
281 
282 #ifdef __linux__
283 #define futex(...)              syscall(__NR_futex, __VA_ARGS__)
284 
285 static inline void futex_wake(QemuEvent *ev, int n)
286 {
287     futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
288 }
289 
290 static inline void futex_wait(QemuEvent *ev, unsigned val)
291 {
292     while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
293         switch (errno) {
294         case EWOULDBLOCK:
295             return;
296         case EINTR:
297             break; /* get out of switch and retry */
298         default:
299             abort();
300         }
301     }
302 }
303 #else
304 static inline void futex_wake(QemuEvent *ev, int n)
305 {
306     pthread_mutex_lock(&ev->lock);
307     if (n == 1) {
308         pthread_cond_signal(&ev->cond);
309     } else {
310         pthread_cond_broadcast(&ev->cond);
311     }
312     pthread_mutex_unlock(&ev->lock);
313 }
314 
315 static inline void futex_wait(QemuEvent *ev, unsigned val)
316 {
317     pthread_mutex_lock(&ev->lock);
318     if (ev->value == val) {
319         pthread_cond_wait(&ev->cond, &ev->lock);
320     }
321     pthread_mutex_unlock(&ev->lock);
322 }
323 #endif
324 
325 /* Valid transitions:
326  * - free->set, when setting the event
327  * - busy->set, when setting the event, followed by futex_wake
328  * - set->free, when resetting the event
329  * - free->busy, when waiting
330  *
331  * set->busy does not happen (it can be observed from the outside but
332  * it really is set->free->busy).
333  *
334  * busy->free provably cannot happen; to enforce it, the set->free transition
335  * is done with an OR, which becomes a no-op if the event has concurrently
336  * transitioned to free or busy.
337  */
338 
339 #define EV_SET         0
340 #define EV_FREE        1
341 #define EV_BUSY       -1
342 
343 void qemu_event_init(QemuEvent *ev, bool init)
344 {
345 #ifndef __linux__
346     pthread_mutex_init(&ev->lock, NULL);
347     pthread_cond_init(&ev->cond, NULL);
348 #endif
349 
350     ev->value = (init ? EV_SET : EV_FREE);
351 }
352 
353 void qemu_event_destroy(QemuEvent *ev)
354 {
355 #ifndef __linux__
356     pthread_mutex_destroy(&ev->lock);
357     pthread_cond_destroy(&ev->cond);
358 #endif
359 }
360 
361 void qemu_event_set(QemuEvent *ev)
362 {
363     /* qemu_event_set has release semantics, but because it *loads*
364      * ev->value we need a full memory barrier here.
365      */
366     smp_mb();
367     if (atomic_read(&ev->value) != EV_SET) {
368         if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
369             /* There were waiters, wake them up.  */
370             futex_wake(ev, INT_MAX);
371         }
372     }
373 }
374 
375 void qemu_event_reset(QemuEvent *ev)
376 {
377     unsigned value;
378 
379     value = atomic_read(&ev->value);
380     smp_mb_acquire();
381     if (value == EV_SET) {
382         /*
383          * If there was a concurrent reset (or even reset+wait),
384          * do nothing.  Otherwise change EV_SET->EV_FREE.
385          */
386         atomic_or(&ev->value, EV_FREE);
387     }
388 }
389 
390 void qemu_event_wait(QemuEvent *ev)
391 {
392     unsigned value;
393 
394     value = atomic_read(&ev->value);
395     smp_mb_acquire();
396     if (value != EV_SET) {
397         if (value == EV_FREE) {
398             /*
399              * Leave the event reset and tell qemu_event_set that there
400              * are waiters.  No need to retry, because there cannot be
401              * a concurrent busy->free transition.  After the CAS, the
402              * event will be either set or busy.
403              */
404             if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
405                 return;
406             }
407         }
408         futex_wait(ev, EV_BUSY);
409     }
410 }
411 
412 static pthread_key_t exit_key;
413 
414 union NotifierThreadData {
415     void *ptr;
416     NotifierList list;
417 };
418 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
419 
420 void qemu_thread_atexit_add(Notifier *notifier)
421 {
422     union NotifierThreadData ntd;
423     ntd.ptr = pthread_getspecific(exit_key);
424     notifier_list_add(&ntd.list, notifier);
425     pthread_setspecific(exit_key, ntd.ptr);
426 }
427 
428 void qemu_thread_atexit_remove(Notifier *notifier)
429 {
430     union NotifierThreadData ntd;
431     ntd.ptr = pthread_getspecific(exit_key);
432     notifier_remove(notifier);
433     pthread_setspecific(exit_key, ntd.ptr);
434 }
435 
436 static void qemu_thread_atexit_run(void *arg)
437 {
438     union NotifierThreadData ntd = { .ptr = arg };
439     notifier_list_notify(&ntd.list, NULL);
440 }
441 
442 static void __attribute__((constructor)) qemu_thread_atexit_init(void)
443 {
444     pthread_key_create(&exit_key, qemu_thread_atexit_run);
445 }
446 
447 
448 /* Attempt to set the threads name; note that this is for debug, so
449  * we're not going to fail if we can't set it.
450  */
451 static void qemu_thread_set_name(QemuThread *thread, const char *name)
452 {
453 #ifdef CONFIG_PTHREAD_SETNAME_NP
454     pthread_setname_np(thread->thread, name);
455 #endif
456 }
457 
458 void qemu_thread_create(QemuThread *thread, const char *name,
459                        void *(*start_routine)(void*),
460                        void *arg, int mode)
461 {
462     sigset_t set, oldset;
463     int err;
464     pthread_attr_t attr;
465 
466     err = pthread_attr_init(&attr);
467     if (err) {
468         error_exit(err, __func__);
469     }
470     if (mode == QEMU_THREAD_DETACHED) {
471         err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
472         if (err) {
473             error_exit(err, __func__);
474         }
475     }
476 
477     /* Leave signal handling to the iothread.  */
478     sigfillset(&set);
479     pthread_sigmask(SIG_SETMASK, &set, &oldset);
480     err = pthread_create(&thread->thread, &attr, start_routine, arg);
481     if (err)
482         error_exit(err, __func__);
483 
484     if (name_threads) {
485         qemu_thread_set_name(thread, name);
486     }
487 
488     pthread_sigmask(SIG_SETMASK, &oldset, NULL);
489 
490     pthread_attr_destroy(&attr);
491 }
492 
493 void qemu_thread_get_self(QemuThread *thread)
494 {
495     thread->thread = pthread_self();
496 }
497 
498 bool qemu_thread_is_self(QemuThread *thread)
499 {
500    return pthread_equal(pthread_self(), thread->thread);
501 }
502 
503 void qemu_thread_exit(void *retval)
504 {
505     pthread_exit(retval);
506 }
507 
508 void *qemu_thread_join(QemuThread *thread)
509 {
510     int err;
511     void *ret;
512 
513     err = pthread_join(thread->thread, &ret);
514     if (err) {
515         error_exit(err, __func__);
516     }
517     return ret;
518 }
519