xref: /qemu/include/qemu/thread.h (revision 84615a19)
1 #ifndef QEMU_THREAD_H
2 #define QEMU_THREAD_H
3 
4 #include "qemu/processor.h"
5 #include "qemu/atomic.h"
6 
7 typedef struct QemuCond QemuCond;
8 typedef struct QemuSemaphore QemuSemaphore;
9 typedef struct QemuEvent QemuEvent;
10 typedef struct QemuLockCnt QemuLockCnt;
11 typedef struct QemuThread QemuThread;
12 
13 #ifdef _WIN32
14 #include "qemu/thread-win32.h"
15 #else
16 #include "qemu/thread-posix.h"
17 #endif
18 
19 /* include QSP header once QemuMutex, QemuCond etc. are defined */
20 #include "qemu/qsp.h"
21 
22 #define QEMU_THREAD_JOINABLE 0
23 #define QEMU_THREAD_DETACHED 1
24 
25 void qemu_mutex_init(QemuMutex *mutex);
26 void qemu_mutex_destroy(QemuMutex *mutex);
27 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
30 
31 void qemu_rec_mutex_init(QemuRecMutex *mutex);
32 void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
33 void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
34 int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
35 void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
36 
37 typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
38 typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
39 typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
40 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
41 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
42                                  int l);
43 typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
44                                       const char *f, int l);
45 
46 extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
47 extern QemuMutexLockFunc qemu_mutex_lock_func;
48 extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
49 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
50 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
51 extern QemuCondWaitFunc qemu_cond_wait_func;
52 extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
53 
54 /* convenience macros to bypass the profiler */
55 #define qemu_mutex_lock__raw(m)                         \
56         qemu_mutex_lock_impl(m, __FILE__, __LINE__)
57 #define qemu_mutex_trylock__raw(m)                      \
58         qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
59 
60 #ifdef __COVERITY__
61 /*
62  * Coverity is severely confused by the indirect function calls,
63  * hide them.
64  */
65 #define qemu_mutex_lock(m)                                              \
66             qemu_mutex_lock_impl(m, __FILE__, __LINE__)
67 #define qemu_mutex_trylock(m)                                           \
68             qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
69 #define qemu_rec_mutex_lock(m)                                          \
70             qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
71 #define qemu_rec_mutex_trylock(m)                                       \
72             qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
73 #define qemu_cond_wait(c, m)                                            \
74             qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
75 #define qemu_cond_timedwait(c, m, ms)                                   \
76             qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
77 #else
78 #define qemu_mutex_lock(m) ({                                           \
79             QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
80             _f(m, __FILE__, __LINE__);                                  \
81         })
82 
83 #define qemu_mutex_trylock(m) ({                                              \
84             QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
85             _f(m, __FILE__, __LINE__);                                        \
86         })
87 
88 #define qemu_rec_mutex_lock(m) ({                                             \
89             QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
90             _f(m, __FILE__, __LINE__);                                        \
91         })
92 
93 #define qemu_rec_mutex_trylock(m) ({                            \
94             QemuRecMutexTrylockFunc _f;                         \
95             _f = qatomic_read(&qemu_rec_mutex_trylock_func);    \
96             _f(m, __FILE__, __LINE__);                          \
97         })
98 
99 #define qemu_cond_wait(c, m) ({                                         \
100             QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func);   \
101             _f(c, m, __FILE__, __LINE__);                               \
102         })
103 
104 #define qemu_cond_timedwait(c, m, ms) ({                                       \
105             QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
106             _f(c, m, ms, __FILE__, __LINE__);                                  \
107         })
108 #endif
109 
110 #define qemu_mutex_unlock(mutex) \
111         qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
112 
113 #define qemu_rec_mutex_unlock(mutex) \
114         qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
115 
116 static inline void (qemu_mutex_lock)(QemuMutex *mutex)
117 {
118     qemu_mutex_lock(mutex);
119 }
120 
121 static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
122 {
123     return qemu_mutex_trylock(mutex);
124 }
125 
126 static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
127 {
128     qemu_mutex_unlock(mutex);
129 }
130 
131 static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
132 {
133     qemu_rec_mutex_lock(mutex);
134 }
135 
136 static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
137 {
138     return qemu_rec_mutex_trylock(mutex);
139 }
140 
141 static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
142 {
143     qemu_rec_mutex_unlock(mutex);
144 }
145 
146 void qemu_cond_init(QemuCond *cond);
147 void qemu_cond_destroy(QemuCond *cond);
148 
149 /*
150  * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
151  * and pthread_cond_broadcast can be called except while the same mutex is
152  * held as in the corresponding pthread_cond_wait calls!
153  */
154 void qemu_cond_signal(QemuCond *cond);
155 void qemu_cond_broadcast(QemuCond *cond);
156 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
157                          const char *file, const int line);
158 bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
159                               const char *file, const int line);
160 
161 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
162 {
163     qemu_cond_wait(cond, mutex);
164 }
165 
166 /* Returns true if timeout has not expired, and false otherwise */
167 static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
168                                          int ms)
169 {
170     return qemu_cond_timedwait(cond, mutex, ms);
171 }
172 
173 void qemu_sem_init(QemuSemaphore *sem, int init);
174 void qemu_sem_post(QemuSemaphore *sem);
175 void qemu_sem_wait(QemuSemaphore *sem);
176 int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
177 void qemu_sem_destroy(QemuSemaphore *sem);
178 
179 void qemu_event_init(QemuEvent *ev, bool init);
180 void qemu_event_set(QemuEvent *ev);
181 void qemu_event_reset(QemuEvent *ev);
182 void qemu_event_wait(QemuEvent *ev);
183 void qemu_event_destroy(QemuEvent *ev);
184 
185 void qemu_thread_create(QemuThread *thread, const char *name,
186                         void *(*start_routine)(void *),
187                         void *arg, int mode);
188 int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
189                              unsigned long nbits);
190 int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
191                              unsigned long *nbits);
192 void *qemu_thread_join(QemuThread *thread);
193 void qemu_thread_get_self(QemuThread *thread);
194 bool qemu_thread_is_self(QemuThread *thread);
195 G_NORETURN void qemu_thread_exit(void *retval);
196 void qemu_thread_naming(bool enable);
197 
198 struct Notifier;
199 /**
200  * qemu_thread_atexit_add:
201  * @notifier: Notifier to add
202  *
203  * Add the specified notifier to a list which will be run via
204  * notifier_list_notify() when this thread exits (either by calling
205  * qemu_thread_exit() or by returning from its start_routine).
206  * The usual usage is that the caller passes a Notifier which is
207  * a per-thread variable; it can then use the callback to free
208  * other per-thread data.
209  *
210  * If the thread exits as part of the entire process exiting,
211  * it is unspecified whether notifiers are called or not.
212  */
213 void qemu_thread_atexit_add(struct Notifier *notifier);
214 /**
215  * qemu_thread_atexit_remove:
216  * @notifier: Notifier to remove
217  *
218  * Remove the specified notifier from the thread-exit notification
219  * list. It is not valid to try to remove a notifier which is not
220  * on the list.
221  */
222 void qemu_thread_atexit_remove(struct Notifier *notifier);
223 
224 #ifdef CONFIG_TSAN
225 #include <sanitizer/tsan_interface.h>
226 #endif
227 
228 struct QemuSpin {
229     int value;
230 };
231 
232 static inline void qemu_spin_init(QemuSpin *spin)
233 {
234     qatomic_set(&spin->value, 0);
235 #ifdef CONFIG_TSAN
236     __tsan_mutex_create(spin, __tsan_mutex_not_static);
237 #endif
238 }
239 
240 static inline void qemu_spin_destroy(QemuSpin *spin)
241 {
242 #ifdef CONFIG_TSAN
243     __tsan_mutex_destroy(spin, __tsan_mutex_not_static);
244 #endif
245 }
246 
247 static inline void qemu_spin_lock(QemuSpin *spin)
248 {
249 #ifdef CONFIG_TSAN
250     __tsan_mutex_pre_lock(spin, 0);
251 #endif
252     while (unlikely(qatomic_xchg(&spin->value, 1))) {
253         while (qatomic_read(&spin->value)) {
254             cpu_relax();
255         }
256     }
257 #ifdef CONFIG_TSAN
258     __tsan_mutex_post_lock(spin, 0, 0);
259 #endif
260 }
261 
262 static inline bool qemu_spin_trylock(QemuSpin *spin)
263 {
264 #ifdef CONFIG_TSAN
265     __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
266 #endif
267     bool busy = qatomic_xchg(&spin->value, true);
268 #ifdef CONFIG_TSAN
269     unsigned flags = __tsan_mutex_try_lock;
270     flags |= busy ? __tsan_mutex_try_lock_failed : 0;
271     __tsan_mutex_post_lock(spin, flags, 0);
272 #endif
273     return busy;
274 }
275 
276 static inline bool qemu_spin_locked(QemuSpin *spin)
277 {
278     return qatomic_read(&spin->value);
279 }
280 
281 static inline void qemu_spin_unlock(QemuSpin *spin)
282 {
283 #ifdef CONFIG_TSAN
284     __tsan_mutex_pre_unlock(spin, 0);
285 #endif
286     qatomic_store_release(&spin->value, 0);
287 #ifdef CONFIG_TSAN
288     __tsan_mutex_post_unlock(spin, 0);
289 #endif
290 }
291 
292 struct QemuLockCnt {
293 #ifndef CONFIG_LINUX
294     QemuMutex mutex;
295 #endif
296     unsigned count;
297 };
298 
299 /**
300  * qemu_lockcnt_init: initialize a QemuLockcnt
301  * @lockcnt: the lockcnt to initialize
302  *
303  * Initialize lockcnt's counter to zero and prepare its mutex
304  * for usage.
305  */
306 void qemu_lockcnt_init(QemuLockCnt *lockcnt);
307 
308 /**
309  * qemu_lockcnt_destroy: destroy a QemuLockcnt
310  * @lockcnt: the lockcnt to destruct
311  *
312  * Destroy lockcnt's mutex.
313  */
314 void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
315 
316 /**
317  * qemu_lockcnt_inc: increment a QemuLockCnt's counter
318  * @lockcnt: the lockcnt to operate on
319  *
320  * If the lockcnt's count is zero, wait for critical sections
321  * to finish and increment lockcnt's count to 1.  If the count
322  * is not zero, just increment it.
323  *
324  * Because this function can wait on the mutex, it must not be
325  * called while the lockcnt's mutex is held by the current thread.
326  * For the same reason, qemu_lockcnt_inc can also contribute to
327  * AB-BA deadlocks.  This is a sample deadlock scenario:
328  *
329  *            thread 1                      thread 2
330  *            -------------------------------------------------------
331  *            qemu_lockcnt_lock(&lc1);
332  *                                          qemu_lockcnt_lock(&lc2);
333  *            qemu_lockcnt_inc(&lc2);
334  *                                          qemu_lockcnt_inc(&lc1);
335  */
336 void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
337 
338 /**
339  * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
340  * @lockcnt: the lockcnt to operate on
341  */
342 void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
343 
344 /**
345  * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
346  * possibly lock it.
347  * @lockcnt: the lockcnt to operate on
348  *
349  * Decrement lockcnt's count.  If the new count is zero, lock
350  * the mutex and return true.  Otherwise, return false.
351  */
352 bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
353 
354 /**
355  * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
356  * lock it.
357  * @lockcnt: the lockcnt to operate on
358  *
359  * If the count is 1, decrement the count to zero, lock
360  * the mutex and return true.  Otherwise, return false.
361  */
362 bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
363 
364 /**
365  * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
366  * @lockcnt: the lockcnt to operate on
367  *
368  * Remember that concurrent visits are not blocked unless the count is
369  * also zero.  You can use qemu_lockcnt_count to check for this inside a
370  * critical section.
371  */
372 void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
373 
374 /**
375  * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
376  * @lockcnt: the lockcnt to operate on.
377  */
378 void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
379 
380 /**
381  * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
382  * @lockcnt: the lockcnt to operate on.
383  *
384  * This is the same as
385  *
386  *     qemu_lockcnt_unlock(lockcnt);
387  *     qemu_lockcnt_inc(lockcnt);
388  *
389  * but more efficient.
390  */
391 void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
392 
393 /**
394  * qemu_lockcnt_count: query a LockCnt's count.
395  * @lockcnt: the lockcnt to query.
396  *
397  * Note that the count can change at any time.  Still, while the
398  * lockcnt is locked, one can usefully check whether the count
399  * is non-zero.
400  */
401 unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
402 
403 #endif
404