xref: /qemu/util/qemu-coroutine.c (revision a976a99a)
1 /*
2  * QEMU coroutines
3  *
4  * Copyright IBM, Corp. 2011
5  *
6  * Authors:
7  *  Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
8  *  Kevin Wolf         <kwolf@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11  * See the COPYING.LIB file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "trace.h"
17 #include "qemu/thread.h"
18 #include "qemu/atomic.h"
19 #include "qemu/coroutine.h"
20 #include "qemu/coroutine_int.h"
21 #include "qemu/coroutine-tls.h"
22 #include "block/aio.h"
23 
24 /**
25  * The minimal batch size is always 64, coroutines from the release_pool are
26  * reused as soon as there are 64 coroutines in it. The maximum pool size starts
27  * with 64 and is increased on demand so that coroutines are not deleted even if
28  * they are not immediately reused.
29  */
30 enum {
31     POOL_MIN_BATCH_SIZE = 64,
32     POOL_INITIAL_MAX_SIZE = 64,
33 };
34 
35 /** Free list to speed up creation */
36 static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool);
37 static unsigned int pool_max_size = POOL_INITIAL_MAX_SIZE;
38 static unsigned int release_pool_size;
39 
40 typedef QSLIST_HEAD(, Coroutine) CoroutineQSList;
41 QEMU_DEFINE_STATIC_CO_TLS(CoroutineQSList, alloc_pool);
42 QEMU_DEFINE_STATIC_CO_TLS(unsigned int, alloc_pool_size);
43 QEMU_DEFINE_STATIC_CO_TLS(Notifier, coroutine_pool_cleanup_notifier);
44 
45 static void coroutine_pool_cleanup(Notifier *n, void *value)
46 {
47     Coroutine *co;
48     Coroutine *tmp;
49     CoroutineQSList *alloc_pool = get_ptr_alloc_pool();
50 
51     QSLIST_FOREACH_SAFE(co, alloc_pool, pool_next, tmp) {
52         QSLIST_REMOVE_HEAD(alloc_pool, pool_next);
53         qemu_coroutine_delete(co);
54     }
55 }
56 
57 Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque)
58 {
59     Coroutine *co = NULL;
60 
61     if (CONFIG_COROUTINE_POOL) {
62         CoroutineQSList *alloc_pool = get_ptr_alloc_pool();
63 
64         co = QSLIST_FIRST(alloc_pool);
65         if (!co) {
66             if (release_pool_size > POOL_MIN_BATCH_SIZE) {
67                 /* Slow path; a good place to register the destructor, too.  */
68                 Notifier *notifier = get_ptr_coroutine_pool_cleanup_notifier();
69                 if (!notifier->notify) {
70                     notifier->notify = coroutine_pool_cleanup;
71                     qemu_thread_atexit_add(notifier);
72                 }
73 
74                 /* This is not exact; there could be a little skew between
75                  * release_pool_size and the actual size of release_pool.  But
76                  * it is just a heuristic, it does not need to be perfect.
77                  */
78                 set_alloc_pool_size(qatomic_xchg(&release_pool_size, 0));
79                 QSLIST_MOVE_ATOMIC(alloc_pool, &release_pool);
80                 co = QSLIST_FIRST(alloc_pool);
81             }
82         }
83         if (co) {
84             QSLIST_REMOVE_HEAD(alloc_pool, pool_next);
85             set_alloc_pool_size(get_alloc_pool_size() - 1);
86         }
87     }
88 
89     if (!co) {
90         co = qemu_coroutine_new();
91     }
92 
93     co->entry = entry;
94     co->entry_arg = opaque;
95     QSIMPLEQ_INIT(&co->co_queue_wakeup);
96     return co;
97 }
98 
99 static void coroutine_delete(Coroutine *co)
100 {
101     co->caller = NULL;
102 
103     if (CONFIG_COROUTINE_POOL) {
104         if (release_pool_size < qatomic_read(&pool_max_size) * 2) {
105             QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
106             qatomic_inc(&release_pool_size);
107             return;
108         }
109         if (get_alloc_pool_size() < qatomic_read(&pool_max_size)) {
110             QSLIST_INSERT_HEAD(get_ptr_alloc_pool(), co, pool_next);
111             set_alloc_pool_size(get_alloc_pool_size() + 1);
112             return;
113         }
114     }
115 
116     qemu_coroutine_delete(co);
117 }
118 
119 void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co)
120 {
121     QSIMPLEQ_HEAD(, Coroutine) pending = QSIMPLEQ_HEAD_INITIALIZER(pending);
122     Coroutine *from = qemu_coroutine_self();
123 
124     QSIMPLEQ_INSERT_TAIL(&pending, co, co_queue_next);
125 
126     /* Run co and any queued coroutines */
127     while (!QSIMPLEQ_EMPTY(&pending)) {
128         Coroutine *to = QSIMPLEQ_FIRST(&pending);
129         CoroutineAction ret;
130 
131         /* Cannot rely on the read barrier for to in aio_co_wake(), as there are
132          * callers outside of aio_co_wake() */
133         const char *scheduled = qatomic_mb_read(&to->scheduled);
134 
135         QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next);
136 
137         trace_qemu_aio_coroutine_enter(ctx, from, to, to->entry_arg);
138 
139         /* if the Coroutine has already been scheduled, entering it again will
140          * cause us to enter it twice, potentially even after the coroutine has
141          * been deleted */
142         if (scheduled) {
143             fprintf(stderr,
144                     "%s: Co-routine was already scheduled in '%s'\n",
145                     __func__, scheduled);
146             abort();
147         }
148 
149         if (to->caller) {
150             fprintf(stderr, "Co-routine re-entered recursively\n");
151             abort();
152         }
153 
154         to->caller = from;
155         to->ctx = ctx;
156 
157         /* Store to->ctx before anything that stores to.  Matches
158          * barrier in aio_co_wake and qemu_co_mutex_wake.
159          */
160         smp_wmb();
161 
162         ret = qemu_coroutine_switch(from, to, COROUTINE_ENTER);
163 
164         /* Queued coroutines are run depth-first; previously pending coroutines
165          * run after those queued more recently.
166          */
167         QSIMPLEQ_PREPEND(&pending, &to->co_queue_wakeup);
168 
169         switch (ret) {
170         case COROUTINE_YIELD:
171             break;
172         case COROUTINE_TERMINATE:
173             assert(!to->locks_held);
174             trace_qemu_coroutine_terminate(to);
175             coroutine_delete(to);
176             break;
177         default:
178             abort();
179         }
180     }
181 }
182 
183 void qemu_coroutine_enter(Coroutine *co)
184 {
185     qemu_aio_coroutine_enter(qemu_get_current_aio_context(), co);
186 }
187 
188 void qemu_coroutine_enter_if_inactive(Coroutine *co)
189 {
190     if (!qemu_coroutine_entered(co)) {
191         qemu_coroutine_enter(co);
192     }
193 }
194 
195 void coroutine_fn qemu_coroutine_yield(void)
196 {
197     Coroutine *self = qemu_coroutine_self();
198     Coroutine *to = self->caller;
199 
200     trace_qemu_coroutine_yield(self, to);
201 
202     if (!to) {
203         fprintf(stderr, "Co-routine is yielding to no one\n");
204         abort();
205     }
206 
207     self->caller = NULL;
208     qemu_coroutine_switch(self, to, COROUTINE_YIELD);
209 }
210 
211 bool qemu_coroutine_entered(Coroutine *co)
212 {
213     return co->caller;
214 }
215 
216 AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
217 {
218     return co->ctx;
219 }
220 
221 void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size)
222 {
223     qatomic_add(&pool_max_size, additional_pool_size);
224 }
225 
226 void qemu_coroutine_dec_pool_size(unsigned int removing_pool_size)
227 {
228     qatomic_sub(&pool_max_size, removing_pool_size);
229 }
230