xref: /qemu/include/qemu/lockable.h (revision 27a4a30e)
1 /*
2  * Polymorphic locking functions (aka poor man templates)
3  *
4  * Copyright Red Hat, Inc. 2017, 2018
5  *
6  * Author: Paolo Bonzini <pbonzini@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
9  * See the COPYING.LIB file in the top-level directory.
10  *
11  */
12 
13 #ifndef QEMU_LOCKABLE_H
14 #define QEMU_LOCKABLE_H
15 
16 #include "qemu/coroutine.h"
17 #include "qemu/thread.h"
18 
19 typedef void QemuLockUnlockFunc(void *);
20 
21 struct QemuLockable {
22     void *object;
23     QemuLockUnlockFunc *lock;
24     QemuLockUnlockFunc *unlock;
25 };
26 
27 /* This function gives an error if an invalid, non-NULL pointer type is passed
28  * to QEMU_MAKE_LOCKABLE.  For optimized builds, we can rely on dead-code elimination
29  * from the compiler, and give the errors already at link time.
30  */
31 #if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
32 void unknown_lock_type(void *);
33 #else
34 static inline void unknown_lock_type(void *unused)
35 {
36     abort();
37 }
38 #endif
39 
40 static inline __attribute__((__always_inline__)) QemuLockable *
41 qemu_make_lockable(void *x, QemuLockable *lockable)
42 {
43     /* We cannot test this in a macro, otherwise we get compiler
44      * warnings like "the address of 'm' will always evaluate as 'true'".
45      */
46     return x ? lockable : NULL;
47 }
48 
49 /* Auxiliary macros to simplify QEMU_MAKE_LOCABLE.  */
50 #define QEMU_LOCK_FUNC(x) ((QemuLockUnlockFunc *)    \
51     QEMU_GENERIC(x,                                  \
52                  (QemuMutex *, qemu_mutex_lock),     \
53                  (QemuRecMutex *, qemu_rec_mutex_lock), \
54                  (CoMutex *, qemu_co_mutex_lock),    \
55                  (QemuSpin *, qemu_spin_lock),       \
56                  unknown_lock_type))
57 
58 #define QEMU_UNLOCK_FUNC(x) ((QemuLockUnlockFunc *)  \
59     QEMU_GENERIC(x,                                  \
60                  (QemuMutex *, qemu_mutex_unlock),   \
61                  (QemuRecMutex *, qemu_rec_mutex_unlock), \
62                  (CoMutex *, qemu_co_mutex_unlock),  \
63                  (QemuSpin *, qemu_spin_unlock),     \
64                  unknown_lock_type))
65 
66 /* In C, compound literals have the lifetime of an automatic variable.
67  * In C++ it would be different, but then C++ wouldn't need QemuLockable
68  * either...
69  */
70 #define QEMU_MAKE_LOCKABLE_(x) (&(QemuLockable) {     \
71         .object = (x),                               \
72         .lock = QEMU_LOCK_FUNC(x),                   \
73         .unlock = QEMU_UNLOCK_FUNC(x),               \
74     })
75 
76 /* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
77  *
78  * @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin).
79  *
80  * Returns a QemuLockable object that can be passed around
81  * to a function that can operate with locks of any kind, or
82  * NULL if @x is %NULL.
83  */
84 #define QEMU_MAKE_LOCKABLE(x)                        \
85     QEMU_GENERIC(x,                                  \
86                  (QemuLockable *, (x)),              \
87                  qemu_make_lockable((x), QEMU_MAKE_LOCKABLE_(x)))
88 
89 /* QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable
90  *
91  * @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin).
92  *
93  * Returns a QemuLockable object that can be passed around
94  * to a function that can operate with locks of any kind.
95  */
96 #define QEMU_MAKE_LOCKABLE_NONNULL(x)                \
97     QEMU_GENERIC(x,                                  \
98                  (QemuLockable *, (x)),              \
99                  QEMU_MAKE_LOCKABLE_(x))
100 
101 static inline void qemu_lockable_lock(QemuLockable *x)
102 {
103     x->lock(x->object);
104 }
105 
106 static inline void qemu_lockable_unlock(QemuLockable *x)
107 {
108     x->unlock(x->object);
109 }
110 
111 static inline QemuLockable *qemu_lockable_auto_lock(QemuLockable *x)
112 {
113     qemu_lockable_lock(x);
114     return x;
115 }
116 
117 static inline void qemu_lockable_auto_unlock(QemuLockable *x)
118 {
119     if (x) {
120         qemu_lockable_unlock(x);
121     }
122 }
123 
124 G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
125 
126 #define WITH_QEMU_LOCK_GUARD_(x, var) \
127     for (g_autoptr(QemuLockable) var = \
128                 qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE_NONNULL((x))); \
129          var; \
130          qemu_lockable_auto_unlock(var), var = NULL)
131 
132 /**
133  * WITH_QEMU_LOCK_GUARD - Lock a lock object for scope
134  *
135  * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin).
136  *
137  * This macro defines a lock scope such that entering the scope takes the lock
138  * and leaving the scope releases the lock.  Return statements are allowed
139  * within the scope and release the lock.  Break and continue statements leave
140  * the scope early and release the lock.
141  *
142  *   WITH_QEMU_LOCK_GUARD(&mutex) {
143  *       ...
144  *       if (error) {
145  *           return; <-- mutex is automatically unlocked
146  *       }
147  *
148  *       if (early_exit) {
149  *           break;  <-- leave this scope early
150  *       }
151  *       ...
152  *   }
153  */
154 #define WITH_QEMU_LOCK_GUARD(x) \
155     WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__)
156 
157 /**
158  * QEMU_LOCK_GUARD - Lock an object until the end of the scope
159  *
160  * @x: a lock object (currently one of QemuMutex, CoMutex, QemuSpin).
161  *
162  * This macro takes a lock until the end of the scope.  Return statements
163  * release the lock.
164  *
165  *   ... <-- mutex not locked
166  *   QEMU_LOCK_GUARD(&mutex); <-- mutex locked from here onwards
167  *   ...
168  *   if (error) {
169  *       return; <-- mutex is automatically unlocked
170  *   }
171  */
172 #define QEMU_LOCK_GUARD(x) \
173     g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
174             qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
175 
176 #endif
177